Exemplo n.º 1
0
  bool MZTrafoModel::train( const CalibrationData& cd, MODELTYPE md, bool use_RANSAC, double rt_left /*= -std::numeric_limits<double>::max()*/, double rt_right /*= std::numeric_limits<double>::max() */ )
  {
    std::vector<double> obs_mz;
    std::vector<double> theo_mz;
    std::vector<double> weights;
    const CalibrationData* p_cd;
    CalibrationData cdm;
    Size i, ie; // CalibrationData's start-to-end interval
    if (cd.getNrOfGroups() > 0) // we have lock mass traces
    { // this is extra work, since we need to collect peak groups and compute the median
      cdm = cd.median(rt_left, rt_right);
      p_cd = &cdm;
      i = 0;
      ie = cdm.size();
    }
    else
    {
      i = std::distance(cd.begin(), lower_bound(cd.begin(), cd.end(), rt_left, RichPeak2D::RTLess()));
      ie = std::distance(cd.begin(), upper_bound(cd.begin(), cd.end(), rt_right, RichPeak2D::RTLess()));
      p_cd = &cd;
    }
    for (Size j = i; j != ie; ++j)
    {
      obs_mz.push_back(p_cd->getError(j)); // could be ppm or [Th], depending on cd::use_ppm_
      theo_mz.push_back(p_cd->getRefMZ(j));
      weights.push_back(p_cd->getWeight(j));
    }

    this->rt_ = (rt_left + rt_right) / 2;

    return (train(obs_mz, theo_mz, weights, md, use_RANSAC));
  }
Exemplo n.º 2
0
bool EyeTrackerCalibration::calibrate(const CalibrationData & calibrationData)
{
    if(calibrationData.size() == 0)
        return false;

    size_t totalCalibrationPointsNum = 0;

    for(const auto & p: calibrationData)
        totalCalibrationPointsNum += p.eyePositions.size();

    if(totalCalibrationPointsNum < 10)
        return false;

    std::vector<cv::Point2d> eyePositions;
    std::vector<cv::Point2d> screenPositions;

    if(m_dataPreprocessingType == NoPreprocessing)
    {
        eyePositions.resize(totalCalibrationPointsNum);
        screenPositions.resize(totalCalibrationPointsNum);

        size_t i = 0;
        for(const auto & p: calibrationData)
        {
            const auto screenPos = p.screenPoint;
            for(const auto & eyePos: p.eyePositions)
            {
                eyePositions[i] = eyePos;
                screenPositions[i] = screenPos;
                ++i;
            }
        }
    }
    else if(m_dataPreprocessingType == MeanPoint)
    {
        eyePositions.resize(calibrationData.size());
        screenPositions.resize(calibrationData.size());

        size_t i = 0;
        for(const auto & p: calibrationData)
        {
            double mean_x = 0.0;
            double mean_y = 0.0;
            for(const auto & eyePos: p.eyePositions)
            {
                mean_x += eyePos.x;
                mean_y += eyePos.y;
            }
            mean_x /= p.eyePositions.size();
            mean_y /= p.eyePositions.size();

            eyePositions[i] = cv::Point2d(mean_x, mean_y);
            screenPositions[i] = p.screenPoint;
            i++;
        }
    }

    return estimateParameters(eyePositions, screenPositions);
}
Exemplo n.º 3
0
void SLStudio::onActionLoadCalibration(){
    QString fileName = QFileDialog::getOpenFileName(this, "Choose calibration file", QString(), "*.xml");
    if(!(fileName.length() == 0)){
        CalibrationData calibration;
        calibration.load(fileName);
        calibration.save("calibration.xml");
    }
}
void CalibrationEngine::CalibrateExtrinsic(vector<cv::Point3f> objectPoints, vector<vector<cv::Point2f>> imagePoints, CalibrationData& calibrationData)
{
  cv::Mat rotationVector;
  cv::Mat translationVector;

  cv::solvePnP(objectPoints, imagePoints, calibrationData.GetIntrinsic(), calibrationData.GetDistortion(), rotationVector, translationVector);
  calibrationData.SetRotationVector(rotationVector);
}
Exemplo n.º 5
0
void SLTrackerWorker::setup(){

    // Initialize the tracker object
    tracker = new TrackerICP();
    CalibrationData calibration = CalibrationData();
    calibration.load("calibration.xml");
    Eigen::Matrix3f Kc;
    Kc << calibration.Kc(0,0), calibration.Kc(0,1), calibration.Kc(0,2),
          calibration.Kc(1,0), calibration.Kc(1,1), calibration.Kc(1,2),
          calibration.Kc(2,0), calibration.Kc(2,1), calibration.Kc(2,2);
    tracker->setCameraMatrix(Kc);

    QSettings settings("SLStudio");
    writeToDisk = settings.value("writeToDisk/tracking",false).toBool();
    if(writeToDisk){

        // Tracking file
        QString fileName = QDateTime::currentDateTime().toString("yyyyMMdd_HHmmss");
        fileName.append(".track");
        ofStream = new std::ofstream;
        ofStream->open(fileName.toLocal8Bit().data(), std::ofstream::out);
        (*ofStream) << "#V1.1 SLStudio Tracking File" << std::endl << "t_ms,Tx,Ty,Tz,Q0,Qx,Qy,Qz,RMS" << std::endl;

        trackingTime.start();
    }
}
int main(int argc, char const ** argv)
{
  char const * keys =
    "{    h | help   | false            | show help message}"
    "{    f | file   | calibration.yaml | calibration yaml file}"
    "{    c | camera | 0                | camera number}"
    ;

  cv::CommandLineParser parser(argc, argv, keys);

  if (parser.get<bool>("help"))
  {
    parser.printParams();
    return 0;
  }
  
  int capnum = parser.get<int>("camera");
  cv::VideoCapture cap(capnum);

  CalibrationData calibData;
  calibData.load(parser.get<std::string>("file"));

  cv::Mat map1, map2;
  cv::initUndistortRectifyMap(calibData.cameraMatrix, calibData.distCoeffs, cv::Mat(), 
      getOptimalNewCameraMatrix(calibData.cameraMatrix, calibData.distCoeffs, calibData.imageSize, 1, calibData.imageSize, 0),
      calibData.imageSize, CV_16SC2, map1, map2);

  cv::namedWindow("undistorted");
  while(true)
  {
    cv::Mat frame, undistFrame;
    cap >> frame;

    if(frame.size() != calibData.imageSize)
    {
      std::cerr << "Frame size does not match calibration image size" << std::endl;
      exit(-1);
    }

    cv::remap(frame, undistFrame, map1, map2, cv::INTER_LINEAR);

    cv::imshow("image", frame);
    cv::imshow("undistorted", undistFrame);

    cv::waitKey(10);
  }

  return 0;
}
Exemplo n.º 7
0
bool InputDevice::SetCalibration(const CalibrationData& data) {
	bool ok = true;
	CalibrationData::const_iterator y = data.begin();
	for (; y != data.end(); ++y) {
		const CalibrationElement& calibration = *y;
		ElementArray::iterator x = element_array_.begin();
		for (; x != element_array_.end(); ++x) {
			InputElement* _element = *x;
			if (_element->GetName() == calibration.first) {
				ok &= _element->SetCalibration(calibration.second);
			}
		}
	}
	return (ok);
}
Exemplo n.º 8
0
bool Hardware::setCalibrationData(Pantilt pantilt, CalibrationData calibrationData)
{
    // XY = xyOnHardware
    // XY' = xyOnScreen

#if 1 // OpenCV, N points
    int nPoints = calibrationData.length();
    float cvAdata[nPoints*3];
    float cvACdata[nPoints*2];
    int row=0;
    foreach (const PointPair &pair, calibrationData)
    {
        // A = (X'1 Y'1 1,
        //      X'2 Y'2 1,
        //      X'3 Y'3 1)
        cvAdata[row*3+0] = pair.first.x();
        cvAdata[row*3+1] = pair.first.y();
        cvAdata[row*3+2] = 1;

        // (ax bx cx)t = 1/A * (X1 X2 X3)t
        cvACdata[row*2+0] = pair.second.x();

        // (ay by cy)t = 1/A * (Y1 Y2 Y3)t
        cvACdata[row*2+1] = pair.second.y();

        ++row;
    }
Exemplo n.º 9
0
void SLStudio::onActionExportCalibration() {
    CalibrationData calibration;
    calibration.load("calibration.xml");
//  Non native file dialog
//    QFileDialog saveFileDialog(this, "Export Calibration", QString(), "*.slcalib;;*.xml;;*.m");
//    saveFileDialog.setDefaultSuffix("slcalib");
//    saveFileDialog.exec();
//    QString fileName = saveFileDialog.selectedFiles().first();
//  Native file dialog
    QString selectedFilter;
    QString fileName = QFileDialog::getSaveFileName(this, "Export Calibration", QString(), "*.slcalib;;*.xml;;*.m", &selectedFilter);
    QFileInfo info(fileName);
    QString type = info.suffix();
    if(type == "")
        fileName.append(selectedFilter.remove(0,1));
    calibration.save(fileName);
}
Exemplo n.º 10
0
void SLPointCloudWidget::updateCalibration(){

    CalibrationData calibration;
    bool load_result = calibration.load("calibration.xml");
	if (!load_result)
		return;
    // Camera coordinate system
    visualizer->addCoordinateSystem(50, "camera", 0);

    // Projector coordinate system
    cv::Mat TransformPCV(4, 4, CV_32F, 0.0);
    cv::Mat(calibration.Rp).copyTo(TransformPCV.colRange(0, 3).rowRange(0, 3));
    cv::Mat(calibration.Tp).copyTo(TransformPCV.col(3).rowRange(0, 3));
    TransformPCV.at<float>(3, 3) = 1.0; // make it homogeneous 
    Eigen::Affine3f TransformP;
    cv::cv2eigen(TransformPCV, TransformP.matrix());

    visualizer->addCoordinateSystem(50, TransformP.inverse(), "projector", 0);
}
Exemplo n.º 11
0
//----------------------------------------------------------------------------------------------------------------------
void SettingsDialog::setCalibrationData(const CalibrationData& calibrationData)
{
#ifdef TEST_ONLY
    QSettings settingsFile("tokenTest_Settings.ini", QSettings::IniFormat);
#else
    QSettings settingsFile("tokenGraver_Settings.ini", QSettings::IniFormat);
#endif
    for (int i = 0; i < calibrationData.size(); ++i) {
        settingsFile.setValue(QString("calibrationData") + QString::number(i), calibrationData[i]);
    }
}
Exemplo n.º 12
0
CalibrationData CalibrationEngine::CalibrateView(vector<cv::Point3f> objectPoints, vector<vector<cv::Point2f>> imagePoints, cv::Size viewSize)
{
  // Start with the identity and it will get refined from there
  cv::Mat distortionCoefficients = cv::Mat::zeros(5, 1, CV_64F);
  cv::Mat intrinsicMatrix = cv::Mat::eye(3, 3, CV_64F);
  vector<cv::Mat> rotationVectors;
  vector<cv::Mat> translationVectors;
  
  vector<vector<cv::Point3f>> objectPointList;
  for(int i = 0; i < imagePoints.size(); ++i)
	{ objectPointList.push_back(objectPoints); }

  cv::calibrateCamera(objectPointList, imagePoints, viewSize, intrinsicMatrix, distortionCoefficients, rotationVectors, translationVectors, CV_CALIB_FIX_K4 | CV_CALIB_FIX_K5);

  CalibrationData data;
  data.SetDistortion(distortionCoefficients);
  data.SetIntrinsic(intrinsicMatrix);
  
  return data;
}
Exemplo n.º 13
0
void scan3d::reconstruct_model_simple(Pointcloud & pointcloud, CalibrationData const& calib, 
                                cv::Mat const& pattern_image, cv::Mat const& min_max_image, cv::Mat const& color_image,
                                cv::Size const& projector_size, int threshold, double max_dist, QWidget * parent_widget)
{
    if (!pattern_image.data || pattern_image.type()!=CV_32FC2)
    {   //pattern not correctly decoded
        std::cerr << "[reconstruct_model] ERROR invalid pattern_image\n";
        return;
    }
    if (!min_max_image.data || min_max_image.type()!=CV_8UC2)
    {   //pattern not correctly decoded
        std::cerr << "[reconstruct_model] ERROR invalid min_max_image\n";
        return;
    }
    if (color_image.data && color_image.type()!=CV_8UC3)
    {   //not standard RGB image
        std::cerr << "[reconstruct_model] ERROR invalid color_image\n";
        return;
    }
    if (!calib.is_valid())
    {   //invalid calibration
        return;
    }

    //parameters
    //const unsigned threshold = config.value("main/shadow_threshold", 70).toUInt();
    //const double   max_dist  = config.value("main/max_dist_threshold", 40).toDouble();
    //const bool     remove_background = config.value("main/remove_background", true).toBool();
    //const double   plane_dist = config.value("main/plane_dist", 100.0).toDouble();
    double plane_dist = 100.0;

    /* background removal
    cv::Point2i plane_coord[3];
    plane_coord[0] = cv::Point2i(config.value("background_plane/x1").toUInt(), config.value("background_plane/y1").toUInt());
    plane_coord[1] = cv::Point2i(config.value("background_plane/x2").toUInt(), config.value("background_plane/y2").toUInt());
    plane_coord[2] = cv::Point2i(config.value("background_plane/x3").toUInt(), config.value("background_plane/y3").toUInt());

    if (plane_coord[0].x<=0 || plane_coord[0].x>=pattern_local.cols
        || plane_coord[0].y<=0 || plane_coord[0].y>=pattern_local.rows)
    {
        plane_coord[0] = cv::Point2i(50, 50);
        config.setValue("background_plane/x1", plane_coord[0].x);
        config.setValue("background_plane/y1", plane_coord[0].y);
    }
    if (plane_coord[1].x<=0 || plane_coord[1].x>=pattern_local.cols
        || plane_coord[1].y<=0 || plane_coord[1].y>=pattern_local.rows)
    {
        plane_coord[1] = cv::Point2i(50, pattern_local.rows-50);
        config.setValue("background_plane/x2", plane_coord[1].x);
        config.setValue("background_plane/y2", plane_coord[1].y);
    }
    if (plane_coord[2].x<=0 || plane_coord[2].x>=pattern_local.cols
        || plane_coord[2].y<=0 || plane_coord[2].y>=pattern_local.rows)
    {
        plane_coord[2] = cv::Point2i(pattern_local.cols-50, 50);
        config.setValue("background_plane/x3", plane_coord[2].x);
        config.setValue("background_plane/y3", plane_coord[2].y);
    }
    */

    //init point cloud
    int scale_factor = 1;
    int out_cols = pattern_image.cols/scale_factor;
    int out_rows = pattern_image.rows/scale_factor;
    pointcloud.clear();
    pointcloud.init_points(out_rows, out_cols);
    pointcloud.init_color(out_rows, out_cols);

    //progress
    QProgressDialog * progress = NULL;
    if (parent_widget)
    {
        progress = new QProgressDialog("Reconstruction in progress.", "Abort", 0, pattern_image.rows, parent_widget, 
                                        Qt::Dialog|Qt::CustomizeWindowHint|Qt::WindowCloseButtonHint);
        progress->setWindowModality(Qt::WindowModal);
        progress->setWindowTitle("Processing");
        progress->setMinimumWidth(400);
    }

    //take 3 points in back plane
    /*cv::Mat plane;
    if (remove_background)
    {
        cv::Point3d p[3];
        for (unsigned i=0; i<3;i++)
        {
            for (unsigned j=0; 
                j<10 && (
                    INVALID(pattern_local.at<cv::Vec2f>(plane_coord[i].y, plane_coord[i].x)[0])
                    || INVALID(pattern_local.at<cv::Vec2f>(plane_coord[i].y, plane_coord[i].x)[1])); j++)
            {
                plane_coord[i].x += 1.f;
            }
            const cv::Vec2f & pattern = pattern_local.at<cv::Vec2f>(plane_coord[i].y, plane_coord[i].x);

            const float col = pattern[0];
            const float row = pattern[1];

            if (projector_size.width<=static_cast<int>(col) || projector_size.height<=static_cast<int>(row))
            {   //abort
                continue;
            }

            //shoot a ray through the image: u=\lambda*v + q
            cv::Point3d u1 = camera.to_world_coord(plane_coord[i].x, plane_coord[i].y);
            cv::Point3d v1 = camera.world_ray(plane_coord[i].x, plane_coord[i].y);

            //shoot a ray through the projector: u=\lambda*v + q
            cv::Point3d u2 = projector.to_world_coord(col, row);
            cv::Point3d v2 = projector.world_ray(col, row);

            //compute ray-ray approximate intersection
            double distance = 0.0;
            p[i] = geometry::approximate_ray_intersection(v1, u1, v2, u2, &distance);
            std::cout << "Plane point " << i << " distance " << distance << std::endl;
        }
        plane = geometry::get_plane(p[0], p[1], p[2]);
        if (cv::Mat(plane.rowRange(0,3).t()*cv::Mat(cv::Point3d(p[0].x, p[0].y, p[0].z-1.0)) + plane.at<double>(3,0)).at<double>(0,0)
                <0.0)
        {
            plane = -1.0*plane;
        }
        std::cout << "Background plane: " << plane << std::endl;
    }
    */

    cv::Mat Rt = calib.R.t();

    unsigned good = 0;
    unsigned bad  = 0;
    unsigned invalid = 0;
    unsigned repeated = 0;
    for (int h=0; h<pattern_image.rows; h+=scale_factor)
    {
        if (progress && h%4==0)
        {
            progress->setValue(h);
            progress->setLabelText(QString("Reconstruction in progress: %1 good points/%2 bad points").arg(good).arg(bad));
            QApplication::instance()->processEvents();
        }
        if (progress && progress->wasCanceled())
        {   //abort
            pointcloud.clear();
            return;
        }

        register const cv::Vec2f * curr_pattern_row = pattern_image.ptr<cv::Vec2f>(h);
        register const cv::Vec2b * min_max_row = min_max_image.ptr<cv::Vec2b>(h);
        for (register int w=0; w<pattern_image.cols; w+=scale_factor)
        {
            double distance = max_dist;  //quality meassure
            cv::Point3d p;               //reconstructed point
            //cv::Point3d normal(0.0, 0.0, 0.0);

            const cv::Vec2f & pattern = curr_pattern_row[w];
            const cv::Vec2b & min_max = min_max_row[w];

            if (sl::INVALID(pattern) || pattern[0]<0.f || pattern[1]<0.f
                || (min_max[1]-min_max[0])<static_cast<int>(threshold))
            {   //skip
                invalid++;
                continue;
            }

            const float col = pattern[0];
            const float row = pattern[1];

            if (projector_size.width<=static_cast<int>(col) || projector_size.height<=static_cast<int>(row))
            {   //abort
                continue;
            }

            cv::Vec3f & cloud_point = pointcloud.points.at<cv::Vec3f>(h/scale_factor, w/scale_factor);
            if (!sl::INVALID(cloud_point[0]))
            {   //point already reconstructed!
                repeated++;
                continue;
            }

            //standard
            cv::Point2d p1(w, h);
            cv::Point2d p2(col, row);
            triangulate_stereo(calib.cam_K, calib.cam_kc, calib.proj_K, calib.proj_kc, Rt, calib.T, p1, p2, p, &distance);

            //save texture coordinates
            /*
            normal.x = static_cast<float>(w)/static_cast<float>(color_image.cols);
            normal.y = static_cast<float>(h)/static_cast<float>(color_image.rows);
            normal.z = 0;
            */

            if (distance < max_dist)
            {   //good point

                //evaluate the plane
                double d = plane_dist+1;
                /*if (remove_background)
                {
                    d = cv::Mat(plane.rowRange(0,3).t()*cv::Mat(p) + plane.at<double>(3,0)).at<double>(0,0);
                }*/
                if (d>plane_dist)
                {   //object point, keep
                    good++;

                    cloud_point[0] = p.x;
                    cloud_point[1] = p.y;
                    cloud_point[2] = p.z;

                    //normal
                    /*cpoint.normal_x = normal.x;
                    cpoint.normal_y = normal.y;
                    cpoint.normal_z = normal.z;*/

                    if (color_image.data)
                    {
                        const cv::Vec3b & vec = color_image.at<cv::Vec3b>(h, w);
                        cv::Vec3b & cloud_color = pointcloud.colors.at<cv::Vec3b>(h/scale_factor, w/scale_factor);
                        cloud_color[0] = vec[0];
                        cloud_color[1] = vec[1];
                        cloud_color[2] = vec[2];
                    }
                }
            }
            else
            {   //skip
                bad++;
                //std::cout << " d = " << distance << std::endl;
            }
        }   //for each column
    }   //for each row

    if (progress)
    {
        progress->setValue(pattern_image.rows);
        progress->close();
        delete progress;
        progress = NULL;
    }

    std::cout << "Reconstructed points[simple]: " << good << " (" << bad << " skipped, " << invalid << " invalid) " << std::endl
                << " - repeated points: " << repeated << " (ignored) " << std::endl;
}
Exemplo n.º 14
0
void scan3d::reconstruct_model_patch_center(Pointcloud & pointcloud, CalibrationData const& calib, 
                                cv::Mat const& pattern_image, cv::Mat const& min_max_image, cv::Mat const& color_image,
                                cv::Size const& projector_size, int threshold, double max_dist, QWidget * parent_widget)
{
    if (!pattern_image.data || pattern_image.type()!=CV_32FC2)
    {   //pattern not correctly decoded
        std::cerr << "[reconstruct_model] ERROR invalid pattern_image\n";
        return;
    }
    if (!min_max_image.data || min_max_image.type()!=CV_8UC2)
    {   //pattern not correctly decoded
        std::cerr << "[reconstruct_model] ERROR invalid min_max_image\n";
        return;
    }
    if (color_image.data && color_image.type()!=CV_8UC3)
    {   //not standard RGB image
        std::cerr << "[reconstruct_model] ERROR invalid color_image\n";
        return;
    }
    if (!calib.is_valid())
    {   //invalid calibration
        return;
    }

    //parameters
    //const unsigned threshold = config.value("main/shadow_threshold", 70).toUInt();
    //const double   max_dist  = config.value("main/max_dist_threshold", 40).toDouble();
    //const bool     remove_background = config.value("main/remove_background", true).toBool();
    //const double   plane_dist = config.value("main/plane_dist", 100.0).toDouble();
    double plane_dist = 100.0;

    /* background removal
    cv::Point2i plane_coord[3];
    plane_coord[0] = cv::Point2i(config.value("background_plane/x1").toUInt(), config.value("background_plane/y1").toUInt());
    plane_coord[1] = cv::Point2i(config.value("background_plane/x2").toUInt(), config.value("background_plane/y2").toUInt());
    plane_coord[2] = cv::Point2i(config.value("background_plane/x3").toUInt(), config.value("background_plane/y3").toUInt());

    if (plane_coord[0].x<=0 || plane_coord[0].x>=pattern_local.cols
        || plane_coord[0].y<=0 || plane_coord[0].y>=pattern_local.rows)
    {
        plane_coord[0] = cv::Point2i(50, 50);
        config.setValue("background_plane/x1", plane_coord[0].x);
        config.setValue("background_plane/y1", plane_coord[0].y);
    }
    if (plane_coord[1].x<=0 || plane_coord[1].x>=pattern_local.cols
        || plane_coord[1].y<=0 || plane_coord[1].y>=pattern_local.rows)
    {
        plane_coord[1] = cv::Point2i(50, pattern_local.rows-50);
        config.setValue("background_plane/x2", plane_coord[1].x);
        config.setValue("background_plane/y2", plane_coord[1].y);
    }
    if (plane_coord[2].x<=0 || plane_coord[2].x>=pattern_local.cols
        || plane_coord[2].y<=0 || plane_coord[2].y>=pattern_local.rows)
    {
        plane_coord[2] = cv::Point2i(pattern_local.cols-50, 50);
        config.setValue("background_plane/x3", plane_coord[2].x);
        config.setValue("background_plane/y3", plane_coord[2].y);
    }
    */

    //init point cloud
    //初始化点云数据为NAN,大小是经过缩放的
    int scale_factor_x = 1;
    int scale_factor_y = (projector_size.width>projector_size.height ? 1 : 2); //XXX HACK: preserve regular aspect ratio XXX HACK
    int out_cols = projector_size.width/scale_factor_x;
    int out_rows = projector_size.height/scale_factor_y;
    pointcloud.clear();
    pointcloud.init_points(out_rows, out_cols);//NAN点:point初始化(pointcloud成员变量)
    pointcloud.init_color(out_rows, out_cols);//白色图像:color初始化(pointcloud成员变量)

    //progress


    //take 3 points in back plane
    /*cv::Mat plane;
    if (remove_background)
    {
        cv::Point3d p[3];
        for (unsigned i=0; i<3;i++)
        {
            for (unsigned j=0; 
                j<10 && (
                    INVALID(pattern_local.at<cv::Vec2f>(plane_coord[i].y, plane_coord[i].x)[0])
                    || INVALID(pattern_local.at<cv::Vec2f>(plane_coord[i].y, plane_coord[i].x)[1])); j++)
            {
                plane_coord[i].x += 1.f;
            }
            const cv::Vec2f & pattern = pattern_local.at<cv::Vec2f>(plane_coord[i].y, plane_coord[i].x);

            const float col = pattern[0];
            const float row = pattern[1];

            if (projector_size.width<=static_cast<int>(col) || projector_size.height<=static_cast<int>(row))
            {   //abort
                continue;
            }

            //shoot a ray through the image: u=\lambda*v + q
            cv::Point3d u1 = camera.to_world_coord(plane_coord[i].x, plane_coord[i].y);
            cv::Point3d v1 = camera.world_ray(plane_coord[i].x, plane_coord[i].y);

            //shoot a ray through the projector: u=\lambda*v + q
            cv::Point3d u2 = projector.to_world_coord(col, row);
            cv::Point3d v2 = projector.world_ray(col, row);

            //compute ray-ray approximate intersection
            double distance = 0.0;
            p[i] = geometry::approximate_ray_intersection(v1, u1, v2, u2, &distance);
            std::cout << "Plane point " << i << " distance " << distance << std::endl;
        }
        plane = geometry::get_plane(p[0], p[1], p[2]);
        if (cv::Mat(plane.rowRange(0,3).t()*cv::Mat(cv::Point3d(p[0].x, p[0].y, p[0].z-1.0)) + plane.at<double>(3,0)).at<double>(0,0)
                <0.0)
        {
            plane = -1.0*plane;
        }
        std::cout << "Background plane: " << plane << std::endl;
    }
    */

    //candidate points
    QMap<unsigned, cv::Point2f> proj_points;
    QMap<unsigned, std::vector<cv::Point2f> > cam_points;

    //cv::Mat proj_image = cv::Mat::zeros(out_rows, out_cols, CV_8UC3);

    unsigned good = 0;
    unsigned bad  = 0;
    unsigned invalid = 0;
    unsigned repeated = 0;
    for (int h=0; h<pattern_image.rows; h++)
    {

        register const cv::Vec2f * curr_pattern_row = pattern_image.ptr<cv::Vec2f>(h);
        register const cv::Vec2b * min_max_row = min_max_image.ptr<cv::Vec2b>(h);
        for (register int w=0; w<pattern_image.cols; w++)
        {
            const cv::Vec2f & pattern = curr_pattern_row[w];
            const cv::Vec2b & min_max = min_max_row[w];

            if (sl::INVALID(pattern) 
                || pattern[0]<0.f || pattern[0]>=projector_size.width || pattern[1]<0.f || pattern[1]>=projector_size.height
                || (min_max[1]-min_max[0])<static_cast<int>(threshold))
            {   //skip
                continue;
            }

            //ok
            cv::Point2f proj_point(pattern[0]/scale_factor_x, pattern[1]/scale_factor_y);

            unsigned index = static_cast<unsigned>(proj_point.y)*out_cols + static_cast<unsigned>(proj_point.x);//索引是投影图像上所有的点(uncertain)

            proj_points.insert(index, proj_point);

             //std::cout<<"index:  "<<index<<std::endl;
            cam_points[index].push_back(cv::Point2f(w, h));

            //std::cout<<"cam_point:  "<<cam_points[index]<<std::endl;

            //proj_image.at<cv::Vec3b>(static_cast<unsigned>(proj_point.y), static_cast<unsigned>(proj_point.x)) = color_image.at<cv::Vec3b>(h, w);
        }

    }


    //cv::imwrite("proj_image.png", proj_image);
    


    cv::Mat Rt = calib.R.t();


    QMapIterator<unsigned, cv::Point2f> iter1(proj_points);
    unsigned n = 0;
    while (iter1.hasNext()) 
    {
        n++;



        iter1.next();
        unsigned index = iter1.key();
        const cv::Point2f & proj_point = iter1.value();
        const std::vector<cv::Point2f> & cam_point_list = cam_points.value(index);
        const unsigned count = static_cast<int>(cam_point_list.size());

        if (!count)
        {   //empty list
            continue;
        }

        //center average
        cv::Point2d sum(0.0, 0.0), sum2(0.0, 0.0);
        for (std::vector<cv::Point2f>::const_iterator iter2=cam_point_list.begin(); iter2!=cam_point_list.end(); iter2++)
        {
            sum.x += iter2->x;
            sum.y += iter2->y;
            sum2.x += (iter2->x)*(iter2->x);
            sum2.y += (iter2->y)*(iter2->y);
        }
        cv::Point2d cam(sum.x/count, sum.y/count);//
        cv::Point2d proj(proj_point.x*scale_factor_x, proj_point.y*scale_factor_y);

        //triangulate
        double distance = max_dist;  //quality meassure
        cv::Point3d p;          //reconstructed point
        triangulate_stereo(calib.cam_K, calib.cam_kc, calib.proj_K, calib.proj_kc, Rt, calib.T, cam, proj, p, &distance);

        if (distance < max_dist)
        {   //good point

            //evaluate the plane
            double d = plane_dist+1;
            /*if (remove_background)
            {
                d = cv::Mat(plane.rowRange(0,3).t()*cv::Mat(p) + plane.at<double>(3,0)).at<double>(0,0);
            }*/
            if (d>plane_dist)
            {   //object point, keep
                good++;

                cv::Vec3f & cloud_point = pointcloud.points.at<cv::Vec3f>(proj_point.y, proj_point.x);
                cloud_point[0] = p.x;
                cloud_point[1] = p.y;
                cloud_point[2] = p.z;
                //std::cout << " pointcloud.points= " <<cloud_point << std::endl;

                if (color_image.data)//每個像素點對應的彩色值,存入
                {
                    const cv::Vec3b & vec = color_image.at<cv::Vec3b>(static_cast<unsigned>(cam.y), static_cast<unsigned>(cam.x));
                    cv::Vec3b & cloud_color = pointcloud.colors.at<cv::Vec3b>(proj_point.y, proj_point.x);
                    cloud_color[0] = vec[0];
                    cloud_color[1] = vec[1];
                    cloud_color[2] = vec[2];
                }
            }

        }
        else
        {   //skip
            bad++;
            //std::cout << " d = " << distance << std::endl;
        }
    }   //while



    std::cout << "Reconstructed points [patch center]: " << good << " (" << bad << " skipped, " << invalid << " invalid) " << std::endl
                << " - repeated points: " << repeated << " (ignored) " << std::endl;
}