Ejemplo n.º 1
0
void Noiser::EstimateNoise()
{
	cout<<"Estimating noise..."<<endl;
	Mat desv_estandar(h,w,CV_32F,Scalar(0));	
	for(int i =0;i<total;i++)
	{
		//desviacion estandar		
		Mat aux(h,w,CV_32F,Scalar(0));
		pow(cleanImage-images[i],2,aux);
		desv_estandar += aux;
	}
	desv_estandar/=total;
	sqrt(desv_estandar,desv_estandar);
	
	double ruido_promedio =  mean(desv_estandar).val[0];
	double ruido_peor;
	minMaxLoc(desv_estandar,NULL,&ruido_peor);

	Mat aux;
	meanStdDev(cleanImage,noArray(),aux);
	double signal = aux.at<double>(0);

	meanStdDev(desv_estandar,noArray(),aux);
	double noise = aux.at<double>(0);

	double SNR = 10*log10(signal/noise);

	cout<<"Ruido Promedio: "<<ruido_promedio<<endl;
	cout<<"Ruido Peor: "<<ruido_peor<<endl;
	cout<<"SNR: "<<SNR<<" db"<<endl;
	this->ready = false;
	

}
Ejemplo n.º 2
0
	void	CTrainNodeCvKNN::train(bool doClean)
	{
#ifdef DEBUG_PRINT_INFO
		printf("\n");
#endif

		// Filling the <samples> and <classes>
		Mat samples, classes;
		for (byte s = 0; s < m_nStates; s++) {						// states
			int nSamples = m_pSamplesAcc->getNumSamples(s);
#ifdef DEBUG_PRINT_INFO		
			printf("State[%d] - %d of %d samples\n", s, nSamples, m_pSamplesAcc->getNumInputSamples(s));
#endif
			samples.push_back(m_pSamplesAcc->getSamplesContainer(s));
			classes.push_back(Mat(nSamples, 1, CV_32FC1, Scalar(s)));
			if (doClean) m_pSamplesAcc->release(s);				// free memory
		} // s
		samples.convertTo(samples, CV_32FC1);

		// Filling <var_type>
		Mat var_type(getNumFeatures() + 1, 1, CV_8UC1, Scalar(ml::VAR_NUMERICAL));		// all inputs are numerical
		var_type.at<byte>(getNumFeatures(), 0) = ml::VAR_CATEGORICAL;

		// Training
		try {
			m_pKNN->train(ml::TrainData::create(samples, ml::ROW_SAMPLE, classes, noArray(), noArray(), noArray(), var_type));
		}
		catch (std::exception &e) {
			printf("EXCEPTION: %s\n", e.what());
			getchar();
			exit(-1);
		}
	}
Ejemplo n.º 3
0
void createLaplacePyr(const Mat &img, int num_levels, std::vector<Mat> &pyr)
{
#ifdef HAVE_TEGRA_OPTIMIZATION
    if(tegra::createLaplacePyr(img, num_levels, pyr))
        return;
#endif

    pyr.resize(num_levels + 1);

    if(img.depth() == CV_8U)
    {
        if(num_levels == 0)
        {
            img.convertTo(pyr[0], CV_16S);
            return;
        }

        Mat downNext;
        Mat current = img;
        pyrDown(img, downNext);

        for(int i = 1; i < num_levels; ++i)
        {
            Mat lvl_up;
            Mat lvl_down;

            pyrDown(downNext, lvl_down);
            pyrUp(downNext, lvl_up, current.size());
            subtract(current, lvl_up, pyr[i-1], noArray(), CV_16S);

            current = downNext;
            downNext = lvl_down;
        }

        {
            Mat lvl_up;
            pyrUp(downNext, lvl_up, current.size());
            subtract(current, lvl_up, pyr[num_levels-1], noArray(), CV_16S);

            downNext.convertTo(pyr[num_levels], CV_16S);
        }
    }
    else
    {
        pyr[0] = img;
        for (int i = 0; i < num_levels; ++i)
            pyrDown(pyr[i], pyr[i + 1]);
        Mat tmp;
        for (int i = 0; i < num_levels; ++i)
        {
            pyrUp(pyr[i + 1], tmp, pyr[i].size());
            subtract(pyr[i], tmp, pyr[i]);
        }
    }
}
Ejemplo n.º 4
0
	static void train_and_print_errs(Ptr<StatModel> model, const Ptr<TrainData>& data)
	{
	    bool ok = model->train(data);
	    if( !ok )
	    {
	        printf("Training failed\n");
	    }
	    else
	    {
	        printf( "train error: %f\n", model->calcError(data, false, noArray()) );
	        printf( "test error: %f\n\n", model->calcError(data, true, noArray()) );
	    }
	}
Ejemplo n.º 5
0
static std::vector<cv::Mat> KMeanSegmentation(cv::Mat image, int no_of_clusters = 2) {
    cv::Mat samples = asSamplesVectors( image );

    const int iters = 100;
    cv::Mat labels;

    kmeans(samples, no_of_clusters, labels,
            TermCriteria( TermCriteria::COUNT, iters, 0.0), 0,
            KMEANS_PP_CENTERS, noArray() );

    std::vector<cv::Mat> segmented;
    for( int i = 0; i < no_of_clusters; i++ ) {
        segmented.push_back( cv::Mat::zeros( image.rows, image.cols, CV_8UC3 ));
    }

    bool isFlt = labels.type() == CV_32FC1;
    int index = 0, result = 0;
    for( int y = 0; y < image.rows; y++ ) {
        for( int x = 0; x < image.cols; x++ ) {
            if (isFlt) result = labels.at<float>(index++);
            else       result = labels.at<int>(index++);
            segmented.at(result).at<Point3i>(y, x, 0) = image.at<Point3i>(y, x, 0);
        }
    }

    return segmented;
}
Ejemplo n.º 6
0
Vec2f DepthMapJudge::CalculateNeighbours(Vec3f center, int cx, int cy, Mat xyz)
{
	int count = 0;
	const double max_z = 10000;
	for(int y = max(0,cy-ws); y < xyz.rows && y<=cy+ws; y++)
    {
        for(int x = max(0,cx-ws); x < xyz.cols && x<=cx+ws; x++)
        {			
			Vec3f point = xyz.at<Vec3f>(y, x);
            if(fabs(point[2] - max_z) < FLT_EPSILON || fabs(point[2]) > max_z) continue; 
			if(norm(point-center)<=radius)
			{
				points.at<float>(0,count) = point[2];
				count++;
			}
        }
    } 
	//create a mask that selects the non zero vlues in points
	Mat mask(points.rows,points.cols,CV_8U,Scalar::all(0));
	for(int i=0;i<count;i++){ mask.at<uchar>(0,i)=255;	}
	//calculate standard deviation
	Mat aux;
	meanStdDev(points,noArray(),aux,mask);
	double stdev = aux.at<double>(0);
	//clean points
	bitwise_and(points,0,points);

	return Vec2f(count,stdev);
}
void extract_features(
    vector<string>& image_names,
    vector<vector<KeyPoint>>& key_points_for_all,
    vector<Mat>& descriptor_for_all,
    vector<vector<Vec3b>>& colors_for_all
    )
{
    key_points_for_all.clear();
    descriptor_for_all.clear();
    Mat image;

    Ptr<Feature2D> sift = xfeatures2d::SIFT::create(0, 3, 0.04, 10);
    for (auto it = image_names.begin(); it != image_names.end(); ++it)
    {
        image = imread(*it);
        if (image.empty()) continue;

        vector<KeyPoint> key_points;
        Mat descriptor;
        sift->detectAndCompute(image, noArray(), key_points, descriptor);

        if (key_points.size() <= 10) continue;

        key_points_for_all.push_back(key_points);
        descriptor_for_all.push_back(descriptor);

        vector<Vec3b> colors(key_points.size());
        for (int i = 0; i < key_points.size(); ++i)
        {
            Point2f& p = key_points[i].pt;
            colors[i] = image.at<Vec3b>(p.y, p.x);
        }
        colors_for_all.push_back(colors);
    }
}
u_int32_t ORBFeatureExtractor::processNewImage(unsigned i_imageId, unsigned i_imgSize,
                                                 char *p_imgData)
{
    Mat img;
    u_int32_t i_ret = ImageLoader::loadImage(i_imgSize, p_imgData, img);
    if (i_ret != OK)
        return i_ret;

    vector<KeyPoint> keypoints;
    Mat descriptors;

    ORB(1000, 1.02, 100)(img, noArray(), keypoints, descriptors);

    unsigned i_nbKeyPoints = 0;
    list<HitForward> imageHits;
    unordered_set<u_int32_t> matchedWords;
    for (unsigned i = 0; i < keypoints.size(); ++i)
    {
        i_nbKeyPoints++;

        // Recording the angle on 16 bits.
        u_int16_t angle = keypoints[i].angle / 360 * (1 << 16);
        u_int16_t x = keypoints[i].pt.x;
        u_int16_t y = keypoints[i].pt.y;

        vector<int> indices(1);
        vector<int> dists(1);
        wordIndex->knnSearch(descriptors.row(i), indices, dists, 1);

        for (unsigned j = 0; j < indices.size(); ++j)
        {
            const unsigned i_wordId = indices[j];
            if (matchedWords.find(i_wordId) == matchedWords.end())
            {
                HitForward newHit;
                newHit.i_wordId = i_wordId;
                newHit.i_imageId = i_imageId;
                newHit.i_angle = angle;
                newHit.x = x;
                newHit.y = y;
                imageHits.push_back(newHit);
                matchedWords.insert(i_wordId);
            }
        }
    }

#if 0
    // Draw keypoints.
    Mat img_res;
    drawKeypoints(img, keypoints, img_res, Scalar::all(-1), DrawMatchesFlags::DEFAULT);

    // Show the image.
    imshow("Keypoints 1", img_res);
    waitKey();
#endif

    // Record the hits.
    return index->addImage(i_imageId, imageHits);
}
Ejemplo n.º 9
0
void DeNoiser::DeNoiseStatic(Mat &image, AlgParameters* params)
{
	if(params==NULL)
	{
		params = new AlgParameters();
		params->P0 = 1;
		params->P1 = 3;
		params->P2 = 1;
		params->P3 = 0;
		params->P4 = 10;
		params->P5 = 0;
		params->P6 = 0;
		params->P7 = 0;
		params->P8 = 0;
		params->P9 = 0;	
	}
	long t = getTickCount();
	if(params->P0>0)
	{		
		for(int i = 0;i<params->P1;i++)	medianBlur(image,image,params->P2*2+1);
	}
	if(params->P3 >0)
	{
		if(image_count<hist_size-1)
		{
			accumulate(image,Accum);
			image.copyTo(*images[(hist_size-1)-image_count]);		
			image_count++;
		}
		else
		{
			//acumular el frame actual
			accumulate(image,Accum);
			//guardar el actual en el inicio del historial
			image.copyTo(*images[0]);	
			//poner como resultado el prom de los history;
			Accum.convertTo(image,image.type(),1.0/hist_size);
			//en este punto la imágen esta lista, falta correr el historial	
			subtract(Accum,*images[hist_size-1],Accum,noArray(),CV_32F);
			//correr las imagenes
			Mat* last = images[hist_size-1];
			for(int i =hist_size-1;i>0;i--) images[i] = images[i-1];
			images[0] = last;
		}
	}

	t = getTickCount() - t;
	float time = t*1000/getTickFrequency();
	if(first_time)
	{
		printf("Noise time: %fms, Fps: %f\n",time,1000/time);
		first_time = false;
	}
}
Ejemplo n.º 10
0
void GetTrackedPoints(const mat3b & im1, const mat3b & im2, vector<TrackedPoint> & points_out, 
		      int maxCorners, float qualityLevel, float minDistance, int blockSize,
		      int winSize_, int maxLevel, int criteriaN, float criteriaEps) {
#if 1
  const int useHarrisDetector = 0;
  const float k = 0.04f;
  const Size winSize(winSize_, winSize_);
  const TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
					     criteriaN, criteriaEps);
  const double derivLambda = 0;
  const int flags = 0;
  assert(im1.size() == im2.size());
  matb im1gray;
  cvtColor(im1, im1gray, CV_BGR2GRAY);
#ifdef OPENCV_2_1
  Mat mask;
  vector<Point2f> corners1, corners2;
  vector<uchar> status;
  vector<float> err;
  goodFeaturesToTrack(im1gray, corners1, maxCorners, qualityLevel, minDistance,
		      mask, blockSize, useHarrisDetector, k);
  calcOpticalFlowPyrLK(im1, im2, corners1, corners2, status, err, winSize, maxLevel,
		       criteria, derivLambda, flags);
  for (int i = 0; i < (signed)corners1.size(); ++i)
    if (status[i])
      points_out.push_back(TrackedPoint(corners1[i].x, corners1[i].y,
					corners2[i].x, corners2[i].y));
#else
  Mat corners1, corners2, status, err;
  goodFeaturesToTrack(im1gray, corners1, maxCorners, qualityLevel, minDistance,
		      noArray(), blockSize, useHarrisDetector, k);
  calcOpticalFlowPyrLK(im1, im2, corners1, corners2, status, err, winSize, maxLevel,
		       criteria, derivLambda, flags);
  for (int i = 0; i < corners1.size().height; ++i)
    if (status.at<unsigned char>(i,0))
      points_out.push_back(TrackedPoint(corners1.at<Vec2f>(i,0)[0],corners1.at<Vec2f>(i,0)[1],
					corners2.at<Vec2f>(i,0)[0],corners2.at<Vec2f>(i,0)[1]));
#endif
#else
  matb im1_gray, im2_gray;
  cvtColor(im1, im1_gray, CV_BGR2GRAY);
  cvtColor(im2, im2_gray, CV_BGR2GRAY);
  Mat flow_cv(im1.size().height, im1.size().width, CV_32FC2);
  calcOpticalFlowFarneback(im1_gray, im2_gray, flow_cv, 0.5, 5, 11, 10, 5, 1.1, 0);
  
  points_out.clear();
  for (int i = 20; i < im1.size().height-20; i += 20)
    for (int j = 20; j < im1.size().width-20; j += 20) {
      const Vec2f f = flow_cv.at<Vec2f>(i, j);
      points_out.push_back(TrackedPoint(j, i, j+f[0], i+f[1]));
    }
  cout << "n points " << points_out.size() << endl;
#endif
}
  void PCAFeature::train_pca()
  {
    // prepare the PCA.
    // (1) collect training examples
    cout << "PCAFeature: collecting" << endl;
    vector<shared_ptr<MetaData> > all_data; // unsupervised so we can do this
    auto load_insert_examples = [&all_data](string direct)
    {
      vector<shared_ptr<MetaData> > some_data = metadata_build_all(direct);
      all_data.insert(all_data.end(),some_data.begin(),some_data.end());
    };
    for(string&test_dir : default_test_dirs())
      load_insert_examples(test_dir);
    //load_insert_examples(default_train_dir());    
    
    // (2) collect the cells for the PCA
    shared_ptr<const ImRGBZ> im0 = cropToCells(*all_data[0]->load_im());
    int winX = im0->cols();
    int winY = im0->rows();
    unique_ptr<SubComputer> hog_for_scale(
      new SubComputer(Size(winX,winY),block_size,block_stride,cell_size));
    int nbins = hog_for_scale->getNBins();
    Mat all_cells(0,nbins,DataType<float>::type);
    for(shared_ptr<MetaData> & metadata : all_data)
    {      
      // compute all cells in the image
      shared_ptr<const ImRGBZ> im = cropToCells(*metadata->load_im());
      vector<float> feats; hog_for_scale->compute(*im,feats);
      assert(feats.size()%nbins == 0);
      for(int cell_iter = 0; cell_iter < feats.size()/nbins; cell_iter++)
      {
	// extract each cell in the image
	auto begin = feats.begin() + nbins*cell_iter;
	auto end = begin + nbins;
	vector<float> cell(begin,end);
	Mat cell_mat(cell); cell_mat = cell_mat.t();
	assert(cell_mat.rows == 1 && cell_mat.cols == nbins);
	all_cells.push_back<float>(cell_mat);
      }
    }
    
    // (3) run the PCA
    cout << "PCAFeature: computing" << endl;
    g_pca_reducer.reset(new cv::PCA(all_cells,noArray(),CV_PCA_DATA_AS_ROW,FEAT_DIM));
    cout << "PCAFeature: ready" << endl;
    
    // (4) save the PCA?
    FileStorage cache_storage("cache/PCA.yml",FileStorage::WRITE);
    cache_storage << "pca_eig_values" << g_pca_reducer->eigenvalues;
    cache_storage << "pca_eig_vectors" << g_pca_reducer->eigenvectors;
    cache_storage << "pca_mean" << g_pca_reducer->mean;
    cache_storage.release();
  }
    FeatureValue FeatureAverageColor::calculateP( cv::InputArray image )
    {
        auto ret = FeatureValue();

        Scalar_< double > avgColor = mean( image, noArray() );

        ret.push_back( avgColor[2] );
        ret.push_back( avgColor[1] );
        ret.push_back( avgColor[0] );

        // cout << "r:" << ret.at( 0 ) << ", g:" << ret.at( 1 ) << ", b:" << ret.at( 2 ) << endl;

        return ret;
    }
void akaze_detection(Mat &image){

    vector<KeyPoint> kpts1, kpts2;
    Mat desc1, desc2;

    Ptr<AKAZE> akaze = AKAZE::create();
	
    akaze->detectAndCompute(image, noArray(), kpts1, desc1);
    
    Mat outimg ;
    drawKeypoints(image,kpts1,outimg);	
    imwrite("outimage/akaze_img.png", outimg);

}
Ejemplo n.º 14
0
  //! 设置车牌图像的置信度
  //! 返回值,0代表是车牌,其他值代表不是
  int PlateJudge::plateSetScore(CPlate& plate) {
    Mat features;
    extractFeature(plate.getPlateMat(), features);

    float score = svm_->predict(features, noArray(), cv::ml::StatModel::Flags::RAW_OUTPUT);

    // score值代表离margin的距离,小于0代表是车牌,大于0代表不是车牌
    // 当小于0时,值越小代表是车牌的概率越大
    plate.setPlateScore(score);

    if (score < 0)
      return 0;
    else
      return -1;
  }
Ejemplo n.º 15
0
void SIFT::buildDoGPyramid( const vector<Mat>& gpyr, vector<Mat>& dogpyr ) const
{
    int nOctaves = (int)gpyr.size()/(nOctaveLayers + 3);
    dogpyr.resize( nOctaves*(nOctaveLayers + 2) );

    for( int o = 0; o < nOctaves; o++ )
    {
        for( int i = 0; i < nOctaveLayers + 2; i++ )
        {
            const Mat& src1 = gpyr[o*(nOctaveLayers + 3) + i];
            const Mat& src2 = gpyr[o*(nOctaveLayers + 3) + i + 1];
            Mat& dst = dogpyr[o*(nOctaveLayers + 2) + i];
            subtract(src2, src1, dst, noArray(), DataType<sift_wt>::type);
        }
    }
}
Ejemplo n.º 16
0
        void PositionCalculatorImpl::addMeasurementImpl( InputArray _tvec, InputArray _rvec, const Point2f _pt
                                                         , double /*time*/,
                                                         InputArray _cameraMatrix, InputArray _distortionMatrix )
        {
            Mat tvec = _tvec.getMat();
            Mat rvec = _rvec.getMat();
            Mat camera_matrix = _cameraMatrix.getMat();
            const Mat distortion_matrix = _distortionMatrix.getMat();

            std::vector< Point2f > pts_in, pts_out;
            pts_in.push_back( _pt );
            undistortPoints( pts_in, pts_out, camera_matrix, distortion_matrix, noArray(), camera_matrix );

            Mat los( 3, 1,CV_64F );

            los.at< double >( 0 ) = pts_out[0].x;
            los.at< double >( 1 ) = pts_out[0].y;
            los.at< double >( 2 ) = 1;

            if ( camera_matrix.type() != CV_64F )
                camera_matrix.convertTo( camera_matrix, CV_64F );
            if ( rvec.type() != CV_64F )
                rvec.convertTo( rvec, CV_64F );
            if ( tvec.type() != CV_64F )
                tvec.convertTo( tvec, CV_64F );

            los = camera_matrix.inv() * los;

            Mat camera_rotation;
            if ( rvec.rows == 3 && rvec.cols == 3 )
                camera_rotation = rvec;
            else
                Rodrigues( rvec, camera_rotation );

            if(tvec.rows == 1)
                tvec = tvec.t();

            camera_rotation = camera_rotation.t();
            const Mat camera_translation = ( -camera_rotation * tvec );
            los = camera_rotation * los;

            positions.push_back( camera_translation );
            Mat zero_pos( 3, 1, CV_64F );
            zero_pos.setTo( 0 );
            const Point2d azel = getAzEl( zero_pos, los );
            angles.push_back( azel );
        }
Ejemplo n.º 17
0
namespace cv
{
CV_EXPORTS_W void add(InputArray src1, Scalar src2, OutputArray dst, InputArray mask=noArray(), int dtype=-1);

CV_EXPORTS_W void subtract(InputArray src1, Scalar src2, OutputArray dst, InputArray mask=noArray(), int dtype=-1);

CV_EXPORTS_W void multiply(InputArray src1, Scalar src2, OutputArray dst, double scale=1, int dtype=-1);

CV_EXPORTS_W void divide(InputArray src1, Scalar src2, OutputArray dst, double scale=1, int dtype=-1);

CV_EXPORTS_W void absdiff(InputArray src1, Scalar src2, OutputArray dst);

CV_EXPORTS_W void compare(InputArray src1, Scalar src2, OutputArray dst, int cmpop);

CV_EXPORTS_W void min(InputArray src1, Scalar src2, OutputArray dst);

CV_EXPORTS_W void max(InputArray src1, Scalar src2, OutputArray dst);

}
Ejemplo n.º 18
0
void ObjectRecognition::loadFeatures(Mat& inImage, vector< KeyPoint >& keypoints, vector< std::vector< float > >& features, Mat& descriptors)
{
	features.clear();
	keypoints.clear();
	if(input_type==0) //input_type = 0 camera
		cvtColor(inImage, inImage, CV_RGB2GRAY);


#if FEATURE_EXTRATION
	//---------------using SIFT to get features descriptors-------------------------
	//cout<< "...Extracting SIFT features..." << endl;

	initModule_nonfree();
	SIFT sift(1, 3, 0.04, 10, 1.0);
	sift(inImage, noArray(), keypoints, descriptors);


	//     vector<vector<float> > vdesc;
	//     vdesc.reserve(descriptors.rows);
	for (int i=0; i<descriptors.rows; i++)
	{
		features.push_back(descriptors.row(i));
	}
	//     cout<< "descriptors: " << vdesc.size() << "  " << vdesc[0].size() << endl;

#else
	//-----------using SURF to get features descriptors------------------------
	vector<float> descriptors;
	cv::Mat mask;
	cv::SURF surf(400, 4, 2, EXTENDED_SURF);
	surf(inimage, mask, keypoints, descriptors);
	features.push_back(vector<vector<float> >());
	changeStructure(descriptors, features.back(), surf.descriptorSize());
#endif

	//----------------------------display keypoints--------------------------  
	//     drawKeypoints(image, keypoints, image, Scalar(255,0,0));
	//     imshow("clusters", image);
	//     waitKey();

}
Ejemplo n.º 19
0
void LucasKanadeOpticalFlow(Mat& previous_gray_frame, Mat& gray_frame, Mat& display_image)
{
	Size img_sz = previous_gray_frame.size();
	int win_size = 10;
	cvtColor(previous_gray_frame, display_image, CV_GRAY2BGR);
	vector<Point2f> previous_features, current_features;
	const int MAX_CORNERS = 500;
	goodFeaturesToTrack(previous_gray_frame, previous_features, MAX_CORNERS, 0.05, 5, noArray(), 3, false, 0.04);
	cornerSubPix(previous_gray_frame, previous_features, Size(win_size, win_size), Size(-1,-1),
                 TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
    vector<uchar> features_found;
	calcOpticalFlowPyrLK(previous_gray_frame, gray_frame, previous_features, current_features, features_found, noArray(),
                         Size(win_size*4+1,win_size*4+1), 5,
                         TermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 ));
    for( int i = 0; i < (int)previous_features.size(); i++ )
	{
		if( !features_found[i] )
			continue;
        circle(display_image, previous_features[i], 1, Scalar(0,0,255));
		line(display_image, previous_features[i], current_features[i], Scalar(0,255,0));   
	}
}
void AdaptiveManifoldFilterN::computeClusters(Mat1b& cluster, Mat1b& cluster_minus, Mat1b& cluster_plus)
{
    Mat difEtaSrc;
    {
        vector<Mat> eta_difCn(jointCnNum);
        for (int i = 0; i < jointCnNum; i++)
            subtract(jointCn[i], etaFull[i], eta_difCn[i]);

        merge(eta_difCn, difEtaSrc);
        difEtaSrc = difEtaSrc.reshape(1, (int)difEtaSrc.total());
        CV_DbgAssert(difEtaSrc.cols == jointCnNum);
    }

    Mat1f initVec(1, jointCnNum);
    if (useRNG)
    {
        rnd.fill(initVec, RNG::UNIFORM, -0.5, 0.5);
    }
    else
    {
        for (int i = 0; i < (int)initVec.total(); i++)
            initVec(0, i) = (i % 2 == 0) ? 0.5f : -0.5f;
    }

    Mat1f eigenVec(1, jointCnNum);
    computeEigenVector(difEtaSrc, cluster, eigenVec, num_pca_iterations_, initVec);

    Mat1f difOreientation;
    gemm(difEtaSrc, eigenVec, 1, noArray(), 0, difOreientation, GEMM_2_T);
    difOreientation = difOreientation.reshape(1, srcSize.height);
    CV_DbgAssert(difOreientation.size() == srcSize);

    compare(difOreientation, 0, cluster_minus, CMP_LT);
    bitwise_and(cluster_minus, cluster, cluster_minus);

    compare(difOreientation, 0, cluster_plus, CMP_GE);
    bitwise_and(cluster_plus, cluster, cluster_plus);
}
Ejemplo n.º 21
0
  CV_EXPORTS_W
  void
  depthTo3dSparse(InputArray depth, InputArray in_K, InputArray in_points, OutputArray points3d);

  /** Converts a depth image to an organized set of 3d points.
   * The coordinate system is x pointing left, y down and z away from the camera
   * @param depth the depth image (if given as short int CV_U, it is assumed to be the depth in millimeters
   *              (as done with the Microsoft Kinect), otherwise, if given as CV_32F or CV_64F, it is assumed in meters)
   * @param K The calibration matrix
   * @param points3d the resulting 3d points. They are of depth the same as `depth` if it is CV_32F or CV_64F, and the
   *        depth of `K` if `depth` is of depth CV_U
   * @param mask the mask of the points to consider (can be empty)
   */
  CV_EXPORTS_W
  void
  depthTo3d(InputArray depth, InputArray K, OutputArray points3d, InputArray mask = noArray());

  /** If the input image is of type CV_16UC1 (like the Kinect one), the image is converted to floats, divided
   * by 1000 to get a depth in meters, and the values 0 are converted to std::numeric_limits<float>::quiet_NaN()
   * Otherwise, the image is simply converted to floats
   * @param in the depth image (if given as short int CV_U, it is assumed to be the depth in millimeters
   *              (as done with the Microsoft Kinect), it is assumed in meters)
   * @param depth the desired output depth (floats or double)
   * @param out The rescaled float depth image
   */
  CV_EXPORTS_W
  void
  rescaleDepth(InputArray in, int depth, OutputArray out);

  /** Object that can compute planes in an image
   */
Ejemplo n.º 22
0
namespace aruco {

//! @addtogroup aruco
//! @{


/**
 * @brief ChArUco board
 * Specific class for ChArUco boards. A ChArUco board is a planar board where the markers are placed
 * inside the white squares of a chessboard. The benefits of ChArUco boards is that they provide
 * both, ArUco markers versatility and chessboard corner precision, which is important for
 * calibration and pose estimation.
 * This class also allows the easy creation and drawing of ChArUco boards.
 */
class  CharucoBoard : public Board {

    public:
    // vector of chessboard 3D corners precalculated
    std::vector< Point3f > chessboardCorners;

    // for each charuco corner, nearest marker id and nearest marker corner id of each marker
    std::vector< std::vector< int > > nearestMarkerIdx;
    std::vector< std::vector< int > > nearestMarkerCorners;

    /**
     * @brief Draw a ChArUco board
     *
     * @param outSize size of the output image in pixels.
     * @param img output image with the board. The size of this image will be outSize
     * and the board will be on the center, keeping the board proportions.
     * @param marginSize minimum margins (in pixels) of the board in the output image
     * @param borderBits width of the marker borders.
     *
     * This function return the image of the ChArUco board, ready to be printed.
     */
    void draw(Size outSize, OutputArray img, int marginSize = 0, int borderBits = 1);


    /**
     * @brief Create a CharucoBoard object
     *
     * @param squaresX number of chessboard squares in X direction
     * @param squaresY number of chessboard squares in Y direction
     * @param squareLength chessboard square side length (normally in meters)
     * @param markerLength marker side length (same unit than squareLength)
     * @param dictionary dictionary of markers indicating the type of markers.
     * The first markers in the dictionary are used to fill the white chessboard squares.
     * @return the output CharucoBoard object
     *
     * This functions creates a CharucoBoard object given the number of squares in each direction
     * and the size of the markers and chessboard squares.
     */
    static CharucoBoard create(int squaresX, int squaresY, float squareLength, float markerLength,
                               Dictionary dictionary);

    /**
      *
      */
    Size getChessboardSize() const { return Size(_squaresX, _squaresY); }

    /**
      *
      */
    float getSquareLength() const { return _squareLength; }

    /**
      *
      */
    float getMarkerLength() const { return _markerLength; }

    private:
    void _getNearestMarkerCorners();

    // number of markers in X and Y directions
    int _squaresX, _squaresY;

    // size of chessboard squares side (normally in meters)
    float _squareLength;

    // marker side lenght (normally in meters)
    float _markerLength;
};




/**
 * @brief Interpolate position of ChArUco board corners
 * @param markerCorners vector of already detected markers corners. For each marker, its four
 * corners are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the
 * dimensions of this array should be Nx4. The order of the corners should be clockwise.
 * @param markerIds list of identifiers for each marker in corners
 * @param image input image necesary for corner refinement. Note that markers are not detected and
 * should be sent in corners and ids parameters.
 * @param board layout of ChArUco board.
 * @param charucoCorners interpolated chessboard corners
 * @param charucoIds interpolated chessboard corners identifiers
 * @param cameraMatrix optional 3x3 floating-point camera matrix
 * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
 * @param distCoeffs optional vector of distortion coefficients
 * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
 *
 * This function receives the detected markers and returns the 2D position of the chessboard corners
 * from a ChArUco board using the detected Aruco markers. If camera parameters are provided,
 * the process is based in an approximated pose estimation, else it is based on local homography.
 * Only visible corners are returned. For each corner, its corresponding identifier is
 * also returned in charucoIds.
 * The function returns the number of interpolated corners.
 */
 int interpolateCornersCharuco(InputArrayOfArrays markerCorners, InputArray markerIds,
                                         InputArray image, const CharucoBoard &board,
                                         OutputArray charucoCorners, OutputArray charucoIds,
                                         InputArray cameraMatrix = noArray(),
                                         InputArray distCoeffs = noArray());




/**
 * @brief Pose estimation for a ChArUco board given some of their corners
 * @param charucoCorners vector of detected charuco corners
 * @param charucoIds list of identifiers for each corner in charucoCorners
 * @param board layout of ChArUco board.
 * @param cameraMatrix input 3x3 floating-point camera matrix
 * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
 * @param distCoeffs vector of distortion coefficients
 * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
 * @param rvec Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board
 * (@sa Rodrigues).
 * @param tvec Output vector (e.g. cv::Mat) corresponding to the translation vector of the board.
 *
 * This function estimates a Charuco board pose from some detected corners.
 * The function checks if the input corners are enough and valid to perform pose estimation.
 * If pose estimation is valid, returns true, else returns false.
 */
 bool estimatePoseCharucoBoard(InputArray charucoCorners, InputArray charucoIds,
                                         CharucoBoard &board, InputArray cameraMatrix,
                                         InputArray distCoeffs, OutputArray rvec, OutputArray tvec);




/**
 * @brief Draws a set of Charuco corners
 * @param image input/output image. It must have 1 or 3 channels. The number of channels is not
 * altered.
 * @param charucoCorners vector of detected charuco corners
 * @param charucoIds list of identifiers for each corner in charucoCorners
 * @param cornerColor color of the square surrounding each corner
 *
 * This function draws a set of detected Charuco corners. If identifiers vector is provided, it also
 * draws the id of each corner.
 */
 void drawDetectedCornersCharuco(InputOutputArray image, InputArray charucoCorners,
                                           InputArray charucoIds = noArray(),
                                           Scalar cornerColor = Scalar(255, 0, 0));



/**
 * @brief Calibrate a camera using Charuco corners
 *
 * @param charucoCorners vector of detected charuco corners per frame
 * @param charucoIds list of identifiers for each corner in charucoCorners per frame
 * @param board Marker Board layout
 * @param imageSize input image size
 * @param cameraMatrix Output 3x3 floating-point camera matrix
 * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If CV\_CALIB\_USE\_INTRINSIC\_GUESS
 * and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
 * initialized before calling the function.
 * @param distCoeffs Output vector of distortion coefficients
 * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
 * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view
 * (e.g. std::vector<cv::Mat>>). That is, each k-th rotation vector together with the corresponding
 * k-th translation vector (see the next output parameter description) brings the board pattern
 * from the model coordinate space (in which object points are specified) to the world coordinate
 * space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1).
 * @param tvecs Output vector of translation vectors estimated for each pattern view.
 * @param flags flags Different flags  for the calibration process (@sa calibrateCamera)
 * @param criteria Termination criteria for the iterative optimization algorithm.
 *
 * This function calibrates a camera using a set of corners of a  Charuco Board. The function
 * receives a list of detected corners and its identifiers from several views of the Board.
 * The function returns the final re-projection error.
 */
 double calibrateCameraCharuco(
    InputArrayOfArrays charucoCorners, InputArrayOfArrays charucoIds, const CharucoBoard &board,
    Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
    OutputArrayOfArrays rvecs = noArray(), OutputArrayOfArrays tvecs = noArray(), int flags = 0,
    TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON));




/**
 * @brief Detect ChArUco Diamond markers
 *
 * @param image input image necessary for corner subpixel.
 * @param markerCorners list of detected marker corners from detectMarkers function.
 * @param markerIds list of marker ids in markerCorners.
 * @param squareMarkerLengthRate rate between square and marker length:
 * squareMarkerLengthRate = squareLength/markerLength. The real units are not necessary.
 * @param diamondCorners output list of detected diamond corners (4 corners per diamond). The order
 * is the same than in marker corners: top left, top right, bottom right and bottom left. Similar
 * format than the corners returned by detectMarkers (e.g std::vector<std::vector<cv::Point2f> > ).
 * @param diamondIds ids of the diamonds in diamondCorners. The id of each diamond is in fact of
 * type Vec4i, so each diamond has 4 ids, which are the ids of the aruco markers composing the
 * diamond.
 * @param cameraMatrix Optional camera calibration matrix.
 * @param distCoeffs Optional camera distortion coefficients.
 *
 * This function detects Diamond markers from the previous detected ArUco markers. The diamonds
 * are returned in the diamondCorners and diamondIds parameters. If camera calibration parameters
 * are provided, the diamond search is based on reprojection. If not, diamond search is based on
 * homography. Homography is faster than reprojection but can slightly reduce the detection rate.
 */
 void detectCharucoDiamond(InputArray image, InputArrayOfArrays markerCorners,
                                     InputArray markerIds, float squareMarkerLengthRate,
                                     OutputArrayOfArrays diamondCorners, OutputArray diamondIds,
                                     InputArray cameraMatrix = noArray(),
                                     InputArray distCoeffs = noArray());



/**
 * @brief Draw a set of detected ChArUco Diamond markers
 *
 * @param image input/output image. It must have 1 or 3 channels. The number of channels is not
 * altered.
 * @param diamondCorners positions of diamond corners in the same format returned by
 * detectCharucoDiamond(). (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,
 * the dimensions of this array should be Nx4. The order of the corners should be clockwise.
 * @param diamondIds vector of identifiers for diamonds in diamondCorners, in the same format
 * returned by detectCharucoDiamond() (e.g. std::vector<Vec4i>).
 * Optional, if not provided, ids are not painted.
 * @param borderColor color of marker borders. Rest of colors (text color and first corner color)
 * are calculated based on this one.
 *
 * Given an array of detected diamonds, this functions draws them in the image. The marker borders
 * are painted and the markers identifiers if provided.
 * Useful for debugging purposes.
 */
 void drawDetectedDiamonds(InputOutputArray image, InputArrayOfArrays diamondCorners,
                                     InputArray diamondIds = noArray(),
                                     Scalar borderColor = Scalar(0, 0, 255));




/**
 * @brief Draw a ChArUco Diamond marker
 *
 * @param dictionary dictionary of markers indicating the type of markers.
 * @param ids list of 4 ids for each ArUco marker in the ChArUco marker.
 * @param squareLength size of the chessboard squares in pixels.
 * @param markerLength size of the markers in pixels.
 * @param img output image with the marker. The size of this image will be
 * 3*squareLength + 2*marginSize,.
 * @param marginSize minimum margins (in pixels) of the marker in the output image
 * @param borderBits width of the marker borders.
 *
 * This function return the image of a ChArUco marker, ready to be printed.
 */
 void drawCharucoDiamond(Dictionary dictionary, Vec4i ids, int squareLength,
                                   int markerLength, OutputArray img, int marginSize = 0,
                                   int borderBits = 1);




//! @}
}
Ejemplo n.º 23
0
namespace cv {
    
    template <typename T> inline int cvType(void) { return CV_8UC1; }
    template<> inline int cvType<int16_t>(void) { return CV_16SC1; }
    template<> inline int cvType<int32_t>(void) { return CV_32SC1; }
    template<> inline int cvType<float>(void) { return CV_32FC1; }
    template<> inline int cvType<double>(void) { return CV_64FC1; }

    
    void make_mask( InputArray image, InputOutputArray mask, double thres=0, int smooth=5, bool filterLarger=false, bool invert=false );
    

enum
{
    MOTION_TRANSLATION = 0,
    MOTION_EUCLIDEAN   = 1,
    MOTION_AFFINE      = 2,
    MOTION_HOMOGRAPHY  = 3
};

/** @brief Finds the geometric transform (warp) between two images in terms of the ECC criterion @cite EP08 .

@param templateImage single-channel template image; CV_8U or CV_32F array.
@param inputImage single-channel input image which should be warped with the final warpMatrix in
order to provide an image similar to templateImage, same type as temlateImage.
@param warpMatrix floating-point \f$2\times 3\f$ or \f$3\times 3\f$ mapping matrix (warp).
@param motionType parameter, specifying the type of motion:
 -   **MOTION_TRANSLATION** sets a translational motion model; warpMatrix is \f$2\times 3\f$ with
     the first \f$2\times 2\f$ part being the unity matrix and the rest two parameters being
     estimated.
 -   **MOTION_EUCLIDEAN** sets a Euclidean (rigid) transformation as motion model; three
     parameters are estimated; warpMatrix is \f$2\times 3\f$.
 -   **MOTION_AFFINE** sets an affine motion model (DEFAULT); six parameters are estimated;
     warpMatrix is \f$2\times 3\f$.
 -   **MOTION_HOMOGRAPHY** sets a homography as a motion model; eight parameters are
     estimated;\`warpMatrix\` is \f$3\times 3\f$.
@param criteria parameter, specifying the termination criteria of the ECC algorithm;
criteria.epsilon defines the threshold of the increment in the correlation coefficient between two
iterations (a negative criteria.epsilon makes criteria.maxcount the only termination criterion).
Default values are shown in the declaration above.
@param inputMask An optional mask to indicate valid values of inputImage.

The function estimates the optimum transformation (warpMatrix) with respect to ECC criterion
(@cite EP08), that is

\f[\texttt{warpMatrix} = \texttt{warpMatrix} = \arg\max_{W} \texttt{ECC}(\texttt{templateImage}(x,y),\texttt{inputImage}(x',y'))\f]

where

\f[\begin{bmatrix} x' \\ y' \end{bmatrix} = W \cdot \begin{bmatrix} x \\ y \\ 1 \end{bmatrix}\f]

(the equation holds with homogeneous coordinates for homography). It returns the final enhanced
correlation coefficient, that is the correlation coefficient between the template image and the
final warped input image. When a \f$3\times 3\f$ matrix is given with motionType =0, 1 or 2, the third
row is ignored.

Unlike findHomography and estimateRigidTransform, the function findTransformECC implements an
area-based alignment that builds on intensity similarities. In essence, the function updates the
initial transformation that roughly aligns the images. If this information is missing, the identity
warp (unity matrix) should be given as input. Note that if images undergo strong
displacements/rotations, an initial transformation that roughly aligns the images is necessary
(e.g., a simple euclidean/similarity transform that allows for the images showing the same image
content approximately). Use inverse warping in the second image to take an image close to the first
one, i.e. use the flag WARP_INVERSE_MAP with warpAffine or warpPerspective. See also the OpenCV
sample image_alignment.cpp that demonstrates the use of the function. Note that the function throws
an exception if algorithm does not converges.

@sa
estimateRigidTransform, findHomography
 */
double findTransformECC( InputArray templateImage, InputArray inputImage,
                         InputOutputArray warpMatrix, int motionType = MOTION_AFFINE,
                         TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 50, 0.001),
                         InputArray inputMask = noArray());


} // cv
Ejemplo n.º 24
0
unsigned test(CClassifier * cl, CCaptcha * captcha)
{
	char captcha_predict[nic];
	unsigned u, v, max_area_ind;
	double area, max_area;
	Mat img = (* captcha)();
	Size size = img.size();
	Mat kernel(3, 3, CV_64FC1);
	Mat blr, median, filter, lpl, sum, temp, nimg, thr;
	vector<Mat> ch;
	vector<vector<Point> > cnt;

	kernel.at<double>(0, 0) = -0.1;
	kernel.at<double>(0, 1) = -0.1;
	kernel.at<double>(0, 2) = -0.1;
	kernel.at<double>(1, 0) = -0.1;
	kernel.at<double>(1, 1) = 2;
	kernel.at<double>(1, 2) = -0.1;
	kernel.at<double>(2, 0) = -0.1;
	kernel.at<double>(2, 1) = -0.1;
	kernel.at<double>(2, 2) = -0.1;

	medianBlur(img, median, 5);
	filter2D(median, filter, -1, kernel);
	Laplacian(filter, lpl, CV_32F, 5);
	threshold(lpl, thr, 150, 255, THRESH_BINARY);

	split(thr, ch);
	add(ch[0], ch[1], temp, noArray());
	add(temp, ch[2], sum, noArray(), CV_8U);

	for(u = 0; u < nic; u++)
	{
		try
		{
			Mat nimg = sum(Range(0, size.height), Range(u * size.width / nic, (u + 1) * size.width / nic)).clone();
			Mat tnimg = nimg.clone();
			temp = nimg.clone();
			Mat vc = vec(nimg);

			captcha_predict[u] = captcha->alphabet(cl->predict(vc));

			printf("%c\n", captcha_predict[u]);

			Mat cnt_img(size.height, size.width / nic, CV_8UC1);
			findContours(temp, cnt, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_TC89_KCOS);

			for(v = 0, max_area = 0; v < cnt.size(); v++)
			{
				area = contourArea(cnt[v]);

				if(area > max_area)
				{
					max_area = area;
					max_area_ind = v;
				}
			}

			vector<vector<Point> > approx_cnt;
			approx_cnt.push_back(vector<Point>());
			approxPolyDP(cnt[max_area_ind], approx_cnt[0], 2, true);

			rectangle(cnt_img, Point(0, 0), Point(size.width, size.height), Scalar::all(0), CV_FILLED);
			drawContours(cnt_img, approx_cnt, -1, Scalar::all(255));

			namedWindow("img", CV_WINDOW_NORMAL);
			namedWindow("nimg", CV_WINDOW_NORMAL);
			namedWindow("median", CV_WINDOW_NORMAL);
			namedWindow("filter", CV_WINDOW_NORMAL);
			namedWindow("laplacian", CV_WINDOW_NORMAL);
			namedWindow("thres", CV_WINDOW_NORMAL);
			namedWindow("sum", CV_WINDOW_NORMAL);
			namedWindow("cnt_img", CV_WINDOW_NORMAL);

			imshow("img", img);
			imshow("nimg", tnimg);
			imshow("median", median);
			imshow("filter", filter);
			imshow("laplacian", lpl);
			imshow("thres", thr);
			imshow("sum", sum);
			imshow("cnt_img", cnt_img);

			waitKey();

			destroyAllWindows();
		}
		catch(...)
		{
			;
		}
	}

	return captcha->check(captcha_predict);
}
Ejemplo n.º 25
0
public:

  explicit SimpleBlobDetectorImpl(const SimpleBlobDetector::Params &parameters = SimpleBlobDetector::Params());

  virtual void read( const FileNode& fn );
  virtual void write( FileStorage& fs ) const;

protected:
  struct CV_EXPORTS Center
  {
      Point2d location;
      double radius;
      double confidence;
  };

  virtual void detect( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() );
  virtual void findBlobs(InputArray image, InputArray binaryImage, std::vector<Center> &centers) const;

  Params params;
};

/*
*  SimpleBlobDetector
*/
SimpleBlobDetector::Params::Params()
{
    thresholdStep = 10;
    minThreshold = 50;
    maxThreshold = 220;
    minRepeatability = 2;
    minDistBetweenBlobs = 10;
Ejemplo n.º 26
0
                               Scalar value = Scalar(), Stream& stream = Stream::Null());

//! @} cudaarithm_core

//! @addtogroup cudaarithm_reduce
//! @{

/** @brief Returns the norm of a matrix (or difference of two matrices).

@param src1 Source matrix. Any matrices except 64F are supported.
@param normType Norm type. NORM_L1 , NORM_L2 , and NORM_INF are supported for now.
@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type.

@sa norm
 */
CV_EXPORTS double norm(InputArray src1, int normType, InputArray mask = noArray());
/** @overload */
CV_EXPORTS void calcNorm(InputArray src, OutputArray dst, int normType, InputArray mask = noArray(), Stream& stream = Stream::Null());

/** @brief Returns the difference of two matrices.

@param src1 Source matrix. Any matrices except 64F are supported.
@param src2 Second source matrix (if any) with the same size and type as src1.
@param normType Norm type. NORM_L1 , NORM_L2 , and NORM_INF are supported for now.

@sa norm
 */
CV_EXPORTS double norm(InputArray src1, InputArray src2, int normType=NORM_L2);
/** @overload */
CV_EXPORTS void calcNormDiff(InputArray src1, InputArray src2, OutputArray dst, int normType=NORM_L2, Stream& stream = Stream::Null());
Ejemplo n.º 27
0
void SIFT::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask) const
{
    (*this)(image, mask, keypoints, noArray());
}
Ejemplo n.º 28
0
    the image). \f$\theta\f$ is the line rotation angle in radians (
    \f$0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\f$ ).
    @param stream Stream for the asynchronous version.

    @sa HoughLines
     */
    virtual void detect(InputArray src, OutputArray lines, Stream& stream = Stream::Null()) = 0;

    /** @brief Downloads results from cuda::HoughLinesDetector::detect to host memory.

    @param d_lines Result of cuda::HoughLinesDetector::detect .
    @param h_lines Output host array.
    @param h_votes Optional output array for line's votes.
    @param stream Stream for the asynchronous version.
     */
    virtual void downloadResults(InputArray d_lines, OutputArray h_lines, OutputArray h_votes = noArray(), Stream& stream = Stream::Null()) = 0;

    virtual void setRho(float rho) = 0;
    virtual float getRho() const = 0;

    virtual void setTheta(float theta) = 0;
    virtual float getTheta() const = 0;

    virtual void setThreshold(int threshold) = 0;
    virtual int getThreshold() const = 0;

    virtual void setDoSort(bool doSort) = 0;
    virtual bool getDoSort() const = 0;

    virtual void setMaxLines(int maxLines) = 0;
    virtual int getMaxLines() const = 0;
Ejemplo n.º 29
0
void cv::calcMotionGradient( InputArray _mhi, OutputArray _mask,
                             OutputArray _orientation,
                             double delta1, double delta2,
                             int aperture_size )
{
    static int runcase = 0; runcase++;

    Mat mhi = _mhi.getMat();
    Size size = mhi.size();

    _mask.create(size, CV_8U);
    _orientation.create(size, CV_32F);

    Mat mask = _mask.getMat();
    Mat orient = _orientation.getMat();

    if( aperture_size < 3 || aperture_size > 7 || (aperture_size & 1) == 0 )
        CV_Error( Error::StsOutOfRange, "aperture_size must be 3, 5 or 7" );

    if( delta1 <= 0 || delta2 <= 0 )
        CV_Error( Error::StsOutOfRange, "both delta's must be positive" );

    if( mhi.type() != CV_32FC1 )
        CV_Error( Error::StsUnsupportedFormat,
                 "MHI must be single-channel floating-point images" );

    if( orient.data == mhi.data )
    {
        _orientation.release();
        _orientation.create(size, CV_32F);
        orient = _orientation.getMat();
    }

    if( delta1 > delta2 )
        std::swap(delta1, delta2);

    float gradient_epsilon = 1e-4f * aperture_size * aperture_size;
    float min_delta = (float)delta1;
    float max_delta = (float)delta2;

    Mat dX_min, dY_max;

    // calc Dx and Dy
    Sobel( mhi, dX_min, CV_32F, 1, 0, aperture_size, 1, 0, BORDER_REPLICATE );
    Sobel( mhi, dY_max, CV_32F, 0, 1, aperture_size, 1, 0, BORDER_REPLICATE );

    int x, y;

    if( mhi.isContinuous() && orient.isContinuous() && mask.isContinuous() )
    {
        size.width *= size.height;
        size.height = 1;
    }

    // calc gradient
    for( y = 0; y < size.height; y++ )
    {
        const float* dX_min_row = dX_min.ptr<float>(y);
        const float* dY_max_row = dY_max.ptr<float>(y);
        float* orient_row = orient.ptr<float>(y);
        uchar* mask_row = mask.ptr<uchar>(y);

        fastAtan2(dY_max_row, dX_min_row, orient_row, size.width, true);

        // make orientation zero where the gradient is very small
        for( x = 0; x < size.width; x++ )
        {
            float dY = dY_max_row[x];
            float dX = dX_min_row[x];

            if( std::abs(dX) < gradient_epsilon && std::abs(dY) < gradient_epsilon )
            {
                mask_row[x] = (uchar)0;
                orient_row[x] = 0.f;
            }
            else
                mask_row[x] = (uchar)1;
        }
    }

    erode( mhi, dX_min, noArray(), Point(-1,-1), (aperture_size-1)/2, BORDER_REPLICATE );
    dilate( mhi, dY_max, noArray(), Point(-1,-1), (aperture_size-1)/2, BORDER_REPLICATE );

    // mask off pixels which have little motion difference in their neighborhood
    for( y = 0; y < size.height; y++ )
    {
        const float* dX_min_row = dX_min.ptr<float>(y);
        const float* dY_max_row = dY_max.ptr<float>(y);
        float* orient_row = orient.ptr<float>(y);
        uchar* mask_row = mask.ptr<uchar>(y);

        for( x = 0; x < size.width; x++ )
        {
            float d0 = dY_max_row[x] - dX_min_row[x];

            if( mask_row[x] == 0 || d0 < min_delta || max_delta < d0 )
            {
                mask_row[x] = (uchar)0;
                orient_row[x] = 0.f;
            }
        }
    }
}
Ejemplo n.º 30
0
void SIFT::operator()(InputArray _image, InputArray _mask,
                      vector<KeyPoint>& keypoints) const
{
    (*this)(_image, _mask, keypoints, noArray());
}