void TemplateMatchCandidates::weakClassifiersForTemplate(
        const cv::Mat &templ, 
        const cv::Mat &templMask,
        const std::vector< cv::Rect > &rects, 
        cv::Mat_<int> &classifiers, 
        cv::Scalar &mean)
    {
        const int nChannels = templ.channels();
        classifiers.create(nChannels, (int)rects.size());

        // Note we use cv::mean here to make use of mask.
        mean = cv::mean(templ, templMask);

        for (int x = 0; x < (int)rects.size(); ++x) {
            cv::Scalar blockMean = cv::mean(templ(rects[x]), templMask.empty() ? cv::noArray() : templMask(rects[x]));
            
            for (int y = 0; y < nChannels; ++y) {
                classifiers(y, x) = blockMean[y] > mean[y] ? 1 : -1;
            }            
        }
    }
Beispiel #2
0
void cameraToWorld(const cv::Mat &R_in, const cv::Mat &T_in, const cv::Mat & in_points_ori,
                   cv::Mat &points_out) {
  cv::Mat_<float> R, T, in_points;
  R_in.convertTo(R, CV_32F);
  T_in.reshape(1, 1).convertTo(T, CV_32F);
  if (in_points_ori.empty() == false)
  {
    in_points_ori.reshape(1, in_points_ori.size().area()).convertTo(in_points, CV_32F);
    cv::Mat_<float> T_repeat;
    cv::repeat(T, in_points.rows, 1, T_repeat);

    // Apply the inverse translation/rotation
    cv::Mat points = (in_points - T_repeat) * R;
    // Reshape to the original size
    points_out = points.reshape(3, in_points_ori.rows);
  }
  else
  {
    points_out = cv::Mat();
  }
}
    //this public function performs the role of
    //main{} in the original file 
vector<Point> CVSquares::detectedSquareInImage (cv::Mat image, float tol, int threshold, int levels, int acc)
{
    vector<vector<Point> > squares;
    vector<Point> biggest_square;
    
    if( image.empty() )
        {
        cout << "CVSquares.m: Couldn't load " << endl;
        }

    tolerance = tol;
    thresh = threshold;
    N = levels;
    accuracy = acc;
    findSquares(image, squares);
    find_largest_square(squares, biggest_square);
//    drawSquares(image, squares);
    drawSquare(image, biggest_square);
    
    return biggest_square;
}
bool FeatureAlgorithm::extractFeatures(const cv::Mat& image, Keypoints& kp, Descriptors& desc) const
{
    assert(!image.empty());

    if (featureEngine)
    {
        (*featureEngine)(image, cv::noArray(), kp, desc);
    }
    else
    {
        detector->detect(image, kp);
    
        if (kp.empty())
            return false;
    
        extractor->compute(image, kp, desc);
    }
    
    
    return kp.size() > 0;
}
Beispiel #5
0
	bool Fern::Train(const vector<cv::Mat>& samp_imgs, const cv::Mat& labels, const FernTrainingParams& params)
	{
			if(samp_imgs.empty() || labels.empty() || samp_imgs.size() != labels.rows)
			{
					std::cerr<<"Invalid training data."<<std::endl;
					return false;
			}

			// init data
		  double maxlabel, minlabel;
			cv::minMaxLoc(labels, &minlabel, &maxlabel);
			class_likelihood.resize((int)maxlabel+1);
			int max_feat_id = (int)std::pow(2.f, params.num_features);
			for(size_t i=0; i<class_likelihood.size(); i++)
			{
					class_likelihood[i] = NodeStatisticsHist(max_feat_id);
			}

			// generate random features
			features.resize(params.num_features);

			// compute node statistics for each feature evaluation
			for(size_t id=0; id<samp_imgs.size(); id++)
			{
					int feat_val = 0;
					for(size_t feat_id=0; feat_id=features.size(); feat_id++)
					{
							double response = features[feat_id].GetFeatureResponse(samp_imgs[id]);
							if(response > 0)
									feat_val = feat_val<<1 | 1;
					}
					
					// add to hist
					class_likelihood[labels.at<int>(id)].AddSample(feat_val, 1);
			}

			std::cout<<"Finish training fern."<<std::endl;

			m_ifTrained = true;
	}
bool VirtualKinect::generatePointClouds(const cv::Mat& depth, cv::Mat& points)
{
    if( depth.empty() || (depth.type() != CV_16UC1 && depth.type() != CV_16SC1) )
        return false;

    points.create(depth.size(), CV_32FC3);

    int cols = depth.cols, rows = depth.rows;
    float co = (cols-1)/2.0, ro = (rows-1)/2.0;
    float scaleFactor = 0.0021f;
    int minDistance = -10;
    float unitFactor = 0.001f; // unit: meter

    for(int r=0; r<rows; r++)
    {
        cv::Point3f* cloud_ptr = (cv::Point3f*)points.ptr(r);
        const ushort* depth_ptr = (const ushort*)depth.ptr(r);
        for(int c=0; c<cols; c++)
        {

            if (depth_ptr[c] > 0)
            {
                float z = depth_ptr[c]; 
                cv::Point3f src(c,r,z);
                //mapProjectiveToRealWorld(src, cloud_ptr[c]);
                cloud_ptr[c].x = unitFactor * (c - co) * (z + minDistance) * scaleFactor;
                cloud_ptr[c].y = unitFactor * (ro - r) * (z + minDistance) * scaleFactor;
                cloud_ptr[c].z = unitFactor * z;
            } 
            else
            {
                cloud_ptr[c].x = 0;
                cloud_ptr[c].y = 0;
                cloud_ptr[c].z = 0;
            }
        }
    }

    return true;
}
Beispiel #7
0
	// Metric constructor + 2d depth
SensorData::SensorData(const cv::Mat & laserScan,
		  const cv::Mat & image,
		  const cv::Mat & depthOrRightImage,
		  float fx,
		  float fyOrBaseline,
		  float cx,
		  float cy,
		  const Transform & localTransform,
		  const Transform & pose,
		  float poseRotVariance,
		  float poseTransVariance,
		  int id,
		  double stamp,
		  const std::vector<unsigned char> & userData) :
	_image(image),
	_id(id),
	_stamp(stamp),
	_depthOrRightImage(depthOrRightImage),
	_laserScan(laserScan),
	_fx(fx),
	_fyOrBaseline(fyOrBaseline),
	_cx(cx),
	_cy(cy),
	_pose(pose),
	_localTransform(localTransform),
	_poseRotVariance(poseRotVariance),
	_poseTransVariance(poseTransVariance),
	_userData(userData)
{
	UASSERT(_laserScan.empty() || _laserScan.type() == CV_32FC2);
	UASSERT(image.type() == CV_8UC1 || // Mono
			image.type() == CV_8UC3);  // RGB
	UASSERT(depthOrRightImage.type() == CV_32FC1 || // Depth in meter
			depthOrRightImage.type() == CV_16UC1 || // Depth in millimetre
			depthOrRightImage.type() == CV_8U);     // Right stereo image
	UASSERT(!depthOrRightImage.empty());
	UASSERT(!_localTransform.isNull());
	UASSERT_MSG(uIsFinite(_poseRotVariance) && _poseRotVariance>0 && uIsFinite(_poseTransVariance) && _poseTransVariance>0, "Rotational and transitional variances should not be null! (set to 1 if unknown)");
}
bool VirtualKinectCapture::retrieve(cv::Mat& frame, int flag)
{
    if ((flag == VK_CAP_TYPE_DEPTH || flag == VK_CAP_TYPE_POINT) && !depth_.empty())
    {
        if (flag == VK_CAP_TYPE_POINT)
        {
            generatePointClouds(depth_, frame);
        }
        else
        {
            depth_.copyTo(frame);
        }
    }
    else if ((flag == VK_CAP_TYPE_IMAGE) && !image_.empty())
    {
        image_.copyTo(frame);
    }
    else
        frame.release();

    return !frame.empty();
}
Beispiel #9
0
void ObjWidget::setData(
        const cv::Mat & image)
{



    rectItems_.clear();
    graphicsView_->scene()->clear();
    graphicsViewInitialized_ = false;



    cvImage_ = image.clone();
    pixmap_ = QPixmap::fromImage(cvtCvMat2QImage(cvImage_));
    //this->setMinimumSize(image_.size());

    if(graphicsViewMode_->isChecked())
    {
        this->setupGraphicsView();
    }
    label_->setVisible(image.empty());
}
Beispiel #10
0
void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
{
    if(action == GLFW_PRESS ){
      if(key == GLFW_KEY_W) { 
        char filename[] = "out.bmp";
        SOIL_save_screenshot
	(
		filename,
		SOIL_SAVE_TYPE_BMP,
		0, 0, WIDTH, HEIGHT
	);
      }
    #ifdef STATIC_IMAGES
    } else if(key == GLFW_KEY_N) {
        // Capture Image
        stream1 >> cameraFrame;
        if( cameraFrame.empty() ) {
         exit(0);
        }
        updated = true;
    #endif
    }
Beispiel #11
0
void on_mouse(int event, int x, int y, int flags, void *){
  if (img.empty()){
    return ;
  }

  if (event == CV_EVENT_LBUTTONDOWN){
    first_pt = cv::Point(x,y);//set the start point
  }
  else if ((event == CV_EVENT_MOUSEMOVE) && (flags & CV_EVENT_FLAG_LBUTTON)){
    second_pt =cv::Point(x,y);    
    img = img0.clone();
    //draw a rectangle
    cv::rectangle(img,first_pt,second_pt,cv::Scalar(255,0,0),1,8,0);

    //cv::Mat img_hdr = img;
    cv::imshow("image",img);
  }
  else if ((event == CV_EVENT_LBUTTONUP)){
    inpaint_mask.create(img0.size(),CV_8UC1);
    cv::rectangle(inpaint_mask, first_pt, second_pt, cv::Scalar(255),-1,8,0);
  }
}
void ImageNodelet::imageCb(const sensor_msgs::ImageConstPtr& msg)
{
  image_mutex_.lock();

  // We want to scale floating point images so that they display nicely
  bool do_dynamic_scaling = (msg->encoding.find("F") != std::string::npos);

  // Convert to OpenCV native BGR color
  try {
    last_image_ = cvtColorForDisplay(cv_bridge::toCvShare(msg), "", do_dynamic_scaling)->image;
  }
  catch (cv_bridge::Exception& e) {
    NODELET_ERROR_THROTTLE(30, "Unable to convert '%s' image for display: '%s'",
                             msg->encoding.c_str(), e.what());
  }

  // Must release the mutex before calling cv::imshow, or can deadlock against
  // OpenCV's window mutex.
  image_mutex_.unlock();
  if (!last_image_.empty())
    cv::imshow(window_name_, last_image_);
}
void MixtureOfGaussianV2BGS::process(const cv::Mat &img_input, cv::Mat &img_output)
{
  if(img_input.empty())
    return;

  loadConfig();

  if(firstTime)
    saveConfig();

  //------------------------------------------------------------------
  // BackgroundSubtractorMOG2
  // http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog2
  //
  // Gaussian Mixture-based Backbround/Foreground Segmentation Algorithm.
  //
  // The class implements the Gaussian mixture model background subtraction described in:
  //  (1) Z.Zivkovic, Improved adaptive Gausian mixture model for background subtraction, International Conference Pattern Recognition, UK, August, 2004, 
  //  The code is very fast and performs also shadow detection. Number of Gausssian components is adapted per pixel.
  //
  //  (2) Z.Zivkovic, F. van der Heijden, Efficient Adaptive Density Estimation per Image Pixel for the Task of Background Subtraction, 
  //  Pattern Recognition Letters, vol. 27, no. 7, pages 773-780, 2006. 
  //  The algorithm similar to the standard Stauffer&Grimson algorithm with additional selection of the number of the Gaussian components based on: 
  //    Z.Zivkovic, F.van der Heijden, Recursive unsupervised learning of finite mixture models, IEEE Trans. on Pattern Analysis and Machine Intelligence, 
  //    vol.26, no.5, pages 651-656, 2004.
  //------------------------------------------------------------------

  mog(img_input, img_foreground, alpha);

  if(enableThreshold)
    cv::threshold(img_foreground, img_foreground, threshold, 255, cv::THRESH_BINARY);

  if(showOutput)
    cv::imshow("Gaussian Mixture Model (Zivkovic&Heijden)", img_foreground);

  img_foreground.copyTo(img_output);

  firstTime = false;
}
cv::Mat tp3::convo(const cv::Mat& oImage, const cv::Mat_<float>& oKernel) {
    CV_Assert(!oImage.empty() && oImage.type()==CV_8UC3 && oImage.isContinuous());
    CV_Assert(!oKernel.empty() && oKernel.cols==oKernel.rows && oKernel.isContinuous());
    CV_Assert(oImage.cols>oKernel.cols && oImage.rows>oKernel.rows);
    cv::Mat oResult(oImage.size(),CV_32FC3);

		for (int row_index = 0; row_index < oImage.rows; ++row_index)
		{
			for (int col_index = 0; col_index < oImage.cols; ++col_index)
			{
				float resultBlue = calculate_convolution(oImage, oKernel, blue, row_index, col_index) / 255;
				float resultGreen = calculate_convolution(oImage, oKernel, green, row_index, col_index) / 255;
				float resultRed = calculate_convolution(oImage, oKernel, red, row_index, col_index) / 255;

				cv::Vec3f result = cv::Vec3f(resultBlue, resultGreen, resultRed);

				oResult.at<cv::Vec3f>(row_index, col_index) = result;
			}
		}

    return oResult;
}
Beispiel #15
0
void Drawing::drawReferenceSystem(cv::Mat &image, const cv::Mat &cRo,
  const cv::Mat &cto, const cv::Mat &A, const cv::Mat &K,
  float length)
{
  cv::Mat k;
  if(K.empty())
    k = cv::Mat::zeros(4,1, cRo.type());
  else
    k = K;

  std::vector<cv::Point3f> oP;
  oP.push_back(cv::Point3f(0,0,0));
  oP.push_back(cv::Point3f(length,0,0));
  oP.push_back(cv::Point3f(0,length,0));
  oP.push_back(cv::Point3f(0,0,length));

  vector<cv::Point2f> points2d;
  cv::projectPoints(cv::Mat(oP), cRo, cto, A, k, points2d);
  
  // draw axis
  CvScalar bluez, greeny, redx;
  
  if(image.channels() == 3 )
  {
    bluez = cvScalar(255,0,0);
    greeny = cvScalar(0,255,0);
    redx = cvScalar(0,0,255);
  }
  else
  {
    bluez = cvScalar(18,18,18);
    greeny = cvScalar(182,182,182);
    redx = cvScalar(120,120,120);
  }

  cv::line(image, points2d[0], points2d[1], redx, 2);
  cv::line(image, points2d[0], points2d[2], greeny, 2);
  cv::line(image, points2d[0], points2d[3], bluez, 2);
}
Beispiel #16
0
void lv::BinClassif::accumulate(const cv::Mat& oClassif, const cv::Mat& oGT, const cv::Mat& oROI) {
    lvAssert_(!oClassif.empty() && oClassif.type()==CV_8UC1,"binary classifier results must be non-empty and of type 8UC1");
    lvAssert_(oGT.empty() || oGT.type()==CV_8UC1,"gt mat must be empty, or of type 8UC1")
    lvAssert_(oROI.empty() || oROI.type()==CV_8UC1,"ROI mat must be empty, or of type 8UC1");
    lvAssert_((oGT.empty() || oClassif.size()==oGT.size()) && (oROI.empty() || oClassif.size()==oROI.size()),"all input mat sizes must match");
    if(oGT.empty()) {
        nDC += oClassif.size().area();
        return;
    }
    const size_t step_row = oClassif.step.p[0];
    for(size_t i = 0; i<(size_t)oClassif.rows; ++i) {
        const size_t idx_nstep = step_row*i;
        const uchar* input_step_ptr = oClassif.data+idx_nstep;
        const uchar* gt_step_ptr = oGT.data+idx_nstep;
        const uchar* roi_step_ptr = oROI.data+idx_nstep;
        for(int j = 0; j<oClassif.cols; ++j) {
            if(gt_step_ptr[j]!=DATASETUTILS_OUTOFSCOPE_VAL &&
               gt_step_ptr[j]!=DATASETUTILS_UNKNOWN_VAL &&
               (oROI.empty() || roi_step_ptr[j]!=DATASETUTILS_NEGATIVE_VAL)) {
                if(input_step_ptr[j]==DATASETUTILS_POSITIVE_VAL) {
                    if(gt_step_ptr[j]==DATASETUTILS_POSITIVE_VAL)
                        ++nTP;
                    else // gt_step_ptr[j]==s_nSegmNegative
                        ++nFP;
                }
                else { // input_step_ptr[j]==s_nSegmNegative
                    if(gt_step_ptr[j]==DATASETUTILS_POSITIVE_VAL)
                        ++nFN;
                    else // gt_step_ptr[j]==s_nSegmNegative
                        ++nTN;
                }
                if(gt_step_ptr[j]==DATASETUTILS_SHADOW_VAL) {
                    if(input_step_ptr[j]==DATASETUTILS_POSITIVE_VAL)
                        ++nSE;
                }
            }
            else
                ++nDC;
        }
    }
}
Beispiel #17
0
bool CopyCvMat2RawImageBuf(const cv::Mat & srcImg, unsigned char * destImg, unsigned int destChannel)
{
	if ( srcImg.empty() ) { return false; }
	// if ( srcImg.channels() <= destChannel ) { return false; }

	int srcChannel = srcImg.channels(); 

	unsigned char * srcRowStart = srcImg.data;
	unsigned char * srcDataPtr = NULL;
	unsigned int minC = (srcChannel < destChannel) ? (srcChannel) : (destChannel);

	for (int y = 0; y < srcImg.rows; ++y, srcRowStart += srcImg.step)
	{
		srcDataPtr = srcRowStart;
	
		for (int x = 0; x < srcImg.cols; ++x, srcDataPtr += srcChannel, destImg += destChannel) 
		for (int k = 0; k < minC; ++k) {
				destImg[k] = srcDataPtr[k];
		}
	}

	return true;
}
PinholeCamera::PinholeCamera(const cv::Mat &_cameraMatrix, const cv::Mat &_distCoeffs, const PoseRT &_extrinsics, const cv::Size &_imageSize)
{
  if (_cameraMatrix.type() == CV_64FC1)
  {
    cameraMatrix = _cameraMatrix;
  }
  else
  {
    _cameraMatrix.convertTo(cameraMatrix, CV_64FC1);
  }

  if (_distCoeffs.empty())
  {
    const int distCoeffsCount = 5;
    distCoeffs = Mat::zeros(distCoeffsCount, 1, CV_32FC1);
  }
  else
  {
    distCoeffs = _distCoeffs;
  }
  extrinsics = _extrinsics;
  imageSize = _imageSize;
}
void makeHisto1D( const cv::Mat& src, vector<double>& hist, int binCt, const cv::Mat& msk )
{
    ASSERT_MSG( src.type() == CV_8UC1, "This method can only use 8 bit single channel images" );
    ASSERT_MSG( msk.type() == CV_8UC1, "Only an 8 bit single channel mask may be used" );

    hist = vector<double>( binCt, 0.0 );
    int N = 0;

    for( int i=0; i<src.rows; i++ )
    {
        for( int j=0; j<src.rows; j++ )
        {
            if( !msk.empty() && msk.at<uchar>(i,j) == 0 )
                continue;
            hist[ src.at<uchar>( i, j ) * binCt / 256 ]++;
            N++;
        }
    }
    for( int i=0; i<binCt; i++ )
    {
        hist[i] /= N;
    }
}
Beispiel #20
0
  void Viewer::display(cv::Mat imageRGB)
  {

	  if (!imageRGB.empty()){


		  Glib::RefPtr<Gdk::Pixbuf> imgBuff =
				  Gdk::Pixbuf::create_from_data((const guint8*) imageRGB.data,Gdk::COLORSPACE_RGB,false,8,imageRGB.cols,imageRGB.rows,imageRGB.step);

		  gtkimage->clear();
		  gtkimage->set(imgBuff);

	  }
	  mainwindow->resize(1,1);
	  while (gtkmain.events_pending())
		  gtkmain.iteration();

	  /*
    colorspaces::ImageRGB8 img_rgb8(image);//conversion will happen if needed
    Glib::RefPtr<Gdk::Pixbuf> imgBuff = 
      Gdk::Pixbuf::create_from_data((const guint8*)img_rgb8.data,
				    Gdk::COLORSPACE_RGB,
				    false,
				    8,
				    img_rgb8.width,
				    img_rgb8.height,
				    img_rgb8.step);
    
    gtkimage->clear();
    gtkimage->set(imgBuff);
    displayFrameRate();
    mainwindow->resize(1,1);
    while (gtkmain.events_pending())
      gtkmain.iteration();

      */
  }
std::vector<cv::Point2f> EyeFinder::findInImage(cv::Mat img) {
	
	std::vector<cv::Rect> eyesRect;
	std::vector<cv::Point2f> ret;
	
	ret.clear();
	eyesRect.clear();

	if(!success) {
		std::cout << "Wrong cascade classifier filename" << std::endl;
	} else {
		if(!img.empty()) {

			cv::Mat gray;
			img.copyTo(gray);
			//cv::cvtColor(img, gray, CV_BGR2GRAY);
			cv::equalizeHist(gray, gray);

			haarClassifier.detectMultiScale(gray, eyesRect, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE);

			if(eyesRect.size() == 2) {
				for(int i = 0; i < eyesRect.size(); i++) {
					cv::Rect eyeROI = eyesRect.at(i);
					float centerX = eyeROI.x + (eyeROI.width/2.0f);
					float centerY = eyeROI.y + (eyeROI.height/2.0f);
					cv::Point2f point(centerX, centerY);
					ret.push_back(point);
				}
			} else {
				std::cout << "Cannot find eyes in image" << std::endl;
			}
		}
	}

	return ret;

}
cv::Mat PointCloudImageCreator::interpolateImage(
    const cv::Mat &image, const cv::Mat &mask) {
    if (image.empty()) {
       return image;
    }
    cv::Mat iimg = image.clone();
    cv::Mat mop_imgg;
    cv::Mat mop_img;
    this->cvMorphologicalOperations(image, mop_imgg, false);
    this->cvMorphologicalOperations(mop_imgg, mop_img, false);
    
    const int neigbour = 1;
    for (int j = neigbour; j < mask.rows - neigbour; j++) {
       for (int i = neigbour; i < mask.cols - neigbour; i++) {
          if (mask.at<float>(j, i) == 0) {
             int p0 = 0;
             int p1 = 0;
             int p2 = 0;
             int icnt = 0;
             for (int y = -neigbour; y < neigbour + 1; y++) {
                for (int x = -neigbour; x < neigbour + 1; x++) {
                   if (x != i && y != j) {
                      p0 += mop_img.at<cv::Vec3b>(j + y, i + x)[0];
                      p1 += mop_img.at<cv::Vec3b>(j + y, i + x)[1];
                      p2 += mop_img.at<cv::Vec3b>(j + y, i + x)[2];
                      icnt++;
                   }
                }
             }
             iimg.at<cv::Vec3b>(j, i)[0] = p0 / icnt;
             iimg.at<cv::Vec3b>(j, i)[1] = p1 / icnt;
             iimg.at<cv::Vec3b>(j, i)[2] = p2 / icnt;
          }
       }
    }
    return iimg;
}
Beispiel #23
0
void ArucoThread::imagesSending( ArucoCore& aCore, const cv::Mat frame ) const
{


	if ( mSendBackgrImgEnabled && !frame.empty() ) {
		if ( ! mMarkerIsBehind ) {
			cv::flip( frame, frame, 1 );
		}
		cv::cvtColor( frame, frame,CV_BGR2RGB );

		emit pushBackgrImage( frame.clone() );
	}

	cv::Mat image;
	if ( mMultiMarkerEnabled ) {
		image = aCore.getDetectedRectangleImage();
	}
	else {
		image = aCore.getDetImage();
	}

	if ( mSendImgEnabled ) {
		if ( ! mMarkerIsBehind ) {
			cv::flip( image, image, 1 );
		}
		cv::cvtColor( image, image, CV_BGR2RGB );


		if ( mSendBackgrImgEnabled ) {
			//if you comment this, background image will be without the augmented reality
			emit pushBackgrImage( image.clone() );
		}

		emit pushImagemMat( image.clone() );

	}
}
double ClassifierTester::StartTest(const LandmarkCollectionDataPtr& collection,
                                 const std::string& savedTrainedDataFilePath,
                                 const int numberOfIterations,
                                 bool showSteps)
{
  BOOST_ASSERT(savedTrainedDataFilePath.size() != 0);
  BOOST_ASSERT(collection->CollectionSize() != 0);
  
  m_collection = collection;
  m_savedTrainedDataFilePath = savedTrainedDataFilePath;
  m_showSteps = showSteps;
  m_numberOfIterations = numberOfIterations;
  
  m_detector = DetectorPtr(new Detector(m_savedTrainedDataFilePath));
  
  int i = 0;
  collection->EnumerateConstColectionWithCallback([&] (const ImageLandmarkDataPtr& landmarkData, const int index, bool* stop) {
    
//    std::cout<<index<<std::endl;
    
    const cv::Mat image = landmarkData->ImageSource();
    const cv::Mat landmarks = processImage(image, landmarkData->LandmarksMat());
    if (landmarks.empty())
    {
      i++;
//      std::cout<<"Fail image "<<landmarkData->ImagePath()<<std::endl;
    }
    else
    {
      collectStatistics(landmarks, landmarkData->LandmarksMat());
    }
  });
  
  std::cout<<"Numeber of fail images: "<<i<<std::endl;
  return calculateStatistics();
}
void ImageNodelet::mouseCb(int event, int x, int y, int flags, void* param)
{
  ImageNodelet *this_ = reinterpret_cast<ImageNodelet*>(param);
  // Trick to use NODELET_* logging macros in static function
  boost::function<const std::string&()> getName =
    boost::bind(&ImageNodelet::getName, this_);
  
  if (event == CV_EVENT_LBUTTONDOWN)
  {
    NODELET_WARN_ONCE("Left-clicking no longer saves images. Right-click instead.");
    return;
  }
  if (event != CV_EVENT_RBUTTONDOWN)
    return;
  
  boost::lock_guard<boost::mutex> guard(this_->image_mutex_);

  const cv::Mat image = this_->last_image_;
  if (image.empty())
  {
    NODELET_WARN("Couldn't save image, no data!");
    return;
  }

  std::string filename = (this_->filename_format_ % this_->count_).str();
  if (cv::imwrite(filename, image))
  {
    NODELET_INFO("Saved image %s", filename.c_str());
    this_->count_++;
  }
  else
  {
    /// @todo Show full path, ask if user has permission to write there
    NODELET_ERROR("Failed to save image.");
  }
}
cv::Mat ImageFunctions::getPatch(const cv::Mat &im, const cv::Point2f &pt,
  unsigned int patch_size)
{
  if(im.empty()) return cv::Mat();
  
  cv::Mat ret;
  
  int c0 = cvRound(pt.x) - patch_size/2;
  int r0 = cvRound(pt.y) - patch_size/2;
  int cf = cvRound(pt.x) + patch_size/2 - 1 + (patch_size % 2);
  int rf = cvRound(pt.y) + patch_size/2 - 1 + (patch_size % 2);
  
  if( c0 < im.cols && r0 < im.rows )
  {
    if(c0 < 0) c0 = 0;
    if(r0 < 0) r0 = 0;
    if(cf >= im.cols) cf = im.cols-1;
    if(rf >= im.rows) rf = im.rows-1;
    
    ret = im( cv::Range(r0, rf), cv::Range(c0, cf) ).clone();
  }
  
  return ret;
}
int StereoMatch::getPointClouds(cv::Mat& disparity, cv::Mat& pointClouds)
{
	if (disparity.empty())
	{
		return 0;
	}

	
	cv::reprojectImageTo3D(disparity, pointClouds, m_Calib_Mat_Q, true);
    pointClouds *= 1.6;
	
	
	for (int y = 0; y < pointClouds.rows; ++y)
	{
		for (int x = 0; x < pointClouds.cols; ++x)
		{
			cv::Point3f point = pointClouds.at<cv::Point3f>(y,x);
            point.y = -point.y;
			pointClouds.at<cv::Point3f>(y,x) = point;
		}
	}

	return 1;
}
void constMaskCb(const stereo_msgs::DisparityImageConstPtr& msg)
{
    if(!image_rect.empty())
    {
        cv::Mat masked;
        image_rect.copyTo(masked, mask);
        cv_bridge::CvImage masked_msg;
        masked_msg.header = msg->header;
        masked_msg.encoding = sensor_msgs::image_encodings::BGR8;

        //if we want rescale then rescale
        if(scale_factor > 1)
        {
            cv::Mat masked_tmp = masked;
            cv::resize(masked_tmp, masked,
                       cv::Size(masked.cols*scale_factor, masked.rows*scale_factor));
        }
        masked_msg.image = masked;
        cam_pub.publish(*masked_msg.toImageMsg(), camera_info);
    }

    //    ROS_INFO("disparityCb runtime: %f ms",
    //             1000*(cv::getTickCount() - ticksBefore)/cv::getTickFrequency());
}
  //---------------------------------------------------------------------------
  void
  OnlineMILAlgorithm::import_image(const cv::Mat & image)
  {
    // We want the internal version of the image to be gray-scale, so let's
    // do that here.  We'll handle cases where the input is either RGB, RGBA,
    // or already gray-scale.  I assume it's already 8-bit.  If not then 
    // an error is thrown.  I'm not going to deal with converting properly
    // from every data type since that shouldn't be happening.

    // Make sure the input image pointer is valid
    if (image.empty())
    {
      std::cerr << "OnlineBoostingAlgorithm::import_image(...) -- ERROR!  Input image pointer is NULL!\n" << std::endl;
      exit(0); // <--- CV_ERROR?
    }

    // Now copy it in appropriately as a gray-scale, 8-bit image
    if (image.channels() == 4)
    {
      cv::cvtColor(image, image_, CV_RGBA2GRAY);
    }
    else if (image.channels() == 3)
    {
      cv::cvtColor(image, image_, CV_RGB2GRAY);
    }
    else if (image.channels() == 1)
    {
      image.copyTo(image_);
    }
    else
    {
      std::cerr << "OnlineBoostingAlgorithm::import_image(...) -- ERROR!  Invalid number of channels for input image!\n"
                << std::endl;
      exit(0);
    }
  }
bool CopyCvMat8uToQImage(const cv::Mat & srcMat, QImage & destImg)
{
	if (srcMat.empty() || destImg.isNull()) {
		return false;
	}

	assert (srcMat.size().height == destImg.height() && srcMat.size().width == destImg.width());
	assert (srcMat.type() == CV_8UC1);

	// double maxDepth = OpenCvUtility::CalcBiggestDepth8u(srcDepthMat);

	const int nXRes = srcMat.size().width;
	const int nYRes = srcMat.size().height;
	const int srcRowStep = srcMat.step;

	const uchar * srcRowPtr = NULL;
	const uchar * srcDataPtr = NULL;

	srcRowPtr = srcMat.data;

	for ( int y = 0; y < nYRes; ++y, srcRowPtr += srcRowStep ) 
	{
		srcDataPtr = srcRowPtr;
		uchar * imagePtr = destImg.scanLine(y);

		for (int x = 0; x < nXRes; ++x, ++srcDataPtr, imagePtr += 4) 
		{
			uchar value = *srcDataPtr;

			imagePtr[2] = imagePtr[1] = imagePtr[0] = value;
			imagePtr[3] = 0xff;
		}
	}
	
	return true;
}