void GaussSeidelSolver::computeNeighborAdjust(Mat heightMat, Mat& adjust) {
	int numRow = heightMat.rows;
	int numCol = heightMat.cols;

	adjust = Mat::zeros(numRow, numCol, CV_32FC1);

	Mat heightSubMat;
	Mat adjustSubMat;

	heightSubMat = heightMat.rowRange(1, numRow);
	adjustSubMat = adjust.rowRange(0, numRow-1);
	adjustSubMat += heightSubMat;

	heightSubMat = heightMat.rowRange(0, numRow-1);
	adjustSubMat = adjust.rowRange(1, numRow);
	adjustSubMat += heightSubMat;

	heightSubMat = heightMat.colRange(1, numCol);
	adjustSubMat = adjust.colRange(0, numCol-1);
	adjustSubMat += heightSubMat;

	heightSubMat = heightMat.colRange(0, numCol-1);
	adjustSubMat = adjust.colRange(1, numCol);
	adjustSubMat += heightSubMat;

	adjust /= 4.0;
}
Example #2
0
File: ecc.cpp Project: 4auka/opencv
static void image_jacobian_affine_ECC(const Mat& src1, const Mat& src2,
                                      const Mat& src3, const Mat& src4,
                                      Mat& dst)
{

    CV_Assert(src1.size() == src2.size());
    CV_Assert(src1.size() == src3.size());
    CV_Assert(src1.size() == src4.size());

    CV_Assert(src1.rows == dst.rows);
    CV_Assert(dst.cols == (6*src1.cols));

    CV_Assert(dst.type() == CV_32FC1);


    const int w = src1.cols;

    //compute Jacobian blocks (6 blocks)

    dst.colRange(0,w) = src1.mul(src3);//1
    dst.colRange(w,2*w) = src2.mul(src3);//2
    dst.colRange(2*w,3*w) = src1.mul(src4);//3
    dst.colRange(3*w,4*w) = src2.mul(src4);//4
    src1.copyTo(dst.colRange(4*w,5*w));//5
    src2.copyTo(dst.colRange(5*w,6*w));//6
}
Example #3
0
void SetBorder2Zero(Mat &mat, int bW, int bH)
{
	mat.rowRange(0, bH).setTo(0);
	mat.rowRange(mat.rows - bH, mat.rows).setTo(0);
	mat.colRange(0, bW).setTo(0);
	mat.colRange(mat.cols - bW, mat.cols).setTo(0);
}
Example #4
0
void NNClassifier::addToNewSamples(const Mat &patch, const int c)
{
    const int newSamplesNum = 100;
    
    Mat &samples = (c == CLASS_POS ? newSamplesP : newSamplesN);
    int n = samples.cols / NN_PATCH_SIZE;

    Mat tmp;
    if(n > newSamplesNum)
    {
        tmp = Mat(NN_PATCH_SIZE, samples.cols, CV_8UC3);
        
        samples.colRange(NN_PATCH_SIZE, samples.cols).copyTo(tmp.colRange(0, samples.cols - NN_PATCH_SIZE));
    }
    else
    {
        tmp = Mat(NN_PATCH_SIZE, samples.cols + NN_PATCH_SIZE, CV_8UC3);
        
        if(samples.cols) samples.colRange(0, samples.cols).copyTo(tmp.colRange(0, samples.cols));
    }
    
    samples = tmp;
    
    Mat _patch = patch.clone();
    normalize(_patch, _patch, 0, 255, NORM_MINMAX);
    _patch.convertTo(_patch, CV_8U);
    cvtColor(_patch, samples.colRange(samples.cols - NN_PATCH_SIZE, samples.cols), CV_GRAY2BGR);
}
Example #5
0
File: ecc.cpp Project: 4auka/opencv
static void image_jacobian_euclidean_ECC(const Mat& src1, const Mat& src2,
                                         const Mat& src3, const Mat& src4,
                                         const Mat& src5, Mat& dst)
{

    CV_Assert( src1.size()==src2.size());
    CV_Assert( src1.size()==src3.size());
    CV_Assert( src1.size()==src4.size());

    CV_Assert( src1.rows == dst.rows);
    CV_Assert(dst.cols == (src1.cols*3));
    CV_Assert(dst.type() == CV_32FC1);

    CV_Assert(src5.isContinuous());

    const float* hptr = src5.ptr<float>(0);

    const float h0 = hptr[0];//cos(theta)
    const float h1 = hptr[3];//sin(theta)

    const int w = src1.cols;

    //create -sin(theta)*X -cos(theta)*Y for all points as a block -> hatX
    Mat hatX = -(src3*h1) - (src4*h0);

    //create cos(theta)*X -sin(theta)*Y for all points as a block -> hatY
    Mat hatY = (src3*h0) - (src4*h1);


    //compute Jacobian blocks (3 blocks)
    dst.colRange(0, w) = (src1.mul(hatX))+(src2.mul(hatY));//1

    src1.copyTo(dst.colRange(w, 2*w));//2
    src2.copyTo(dst.colRange(2*w, 3*w));//3
}
Example #6
0
//计算式中的d(a, b),点的距离的平方和
double d(Point2i i, Point2i xi, Mat line, bool which){
	Mat m1 = line.colRange(Range(i.x - winr, i.x + winr)).rowRange(Range(i.y - winr, i.y + winr));
	Mat m2 = line.colRange(Range(xi.x - winr, xi.x + winr)).rowRange(Range(xi.y - winr, xi.y + winr));
	double dist = 0;
	vector<Point2i> ml1, ml2;
	for (int y = 0; y < m1.rows; y++)
	for (int x = 0; x < m1.cols; x++){
		if (m1.at<uchar>(y, x)){
			ml1.push_back(Point2i(x, y));
		}
	}
	for (int y = 0; y < m2.rows; y++)
	for (int x = 0; x < m2.cols; x++){
		if (m2.at<uchar>(y, x)){
			ml2.push_back(Point2i(x, y));
		}
	}
	for (int l1 = 0; l1 < ml1.size(); l1++){
		double min = 1E20;
		for (int l2 = 0; l2 < ml2.size(); l2++){
			Point2i vec = ml1[l1] - ml2[l2];
			double dd = (vec.x*vec.x + vec.y*vec.y);
			if (dd < min) min = dd;
		}
		dist += min;
	}
  if (which) return dist / ml2.size();
  else return dist / ml1.size();
}
void RGBLineChartThread::run()
{
	Mat frame;
	Mat b_hist,g_hist,r_hist;
	std::vector<Mat> bgr_planes;
	
	int histSize = 256;
	float range[] = {0,256};
	const float* histRange = {range};
	bool uniform = true;
	bool accumulate = false;

	int hist_w = 512;
	int hist_h = 400;
	int bin_w = cvRound((double)hist_w / histSize);
	Mat histImage(hist_h , hist_w , CV_8UC3 , Scalar(0,0,0));
	Mat MergedImage;

	if( !cap.isOpened() )
		return;

	while(true)
	{
		cap >> frame;
		split(frame , bgr_planes);
		histImage = Mat::zeros(hist_h , hist_w , CV_8UC3);

		cv::calcHist(&bgr_planes[0],1,0,Mat(),b_hist,1,&histSize,&histRange,uniform,accumulate);
		cv::calcHist(&bgr_planes[1],1,0,Mat(),g_hist,1,&histSize,&histRange,uniform,accumulate);
		cv::calcHist(&bgr_planes[2],1,0,Mat(),r_hist,1,&histSize,&histRange,uniform,accumulate);

		cv::normalize(b_hist,b_hist,0,histImage.rows,NORM_MINMAX,-1,Mat());
		cv::normalize(g_hist,g_hist,0,histImage.rows,NORM_MINMAX,-1,Mat());
		cv::normalize(r_hist,r_hist,0,histImage.rows,NORM_MINMAX,-1,Mat());

		for(int i=1; i<histSize; i++)
		{
			cv::line(histImage,Point(bin_w*(i-1) , hist_h - cvRound(b_hist.at<float>(i - 1)) ),Point(bin_w*i , hist_h - cvRound(b_hist.at<float>(i)) ),Scalar(255,0,0),2,8,0);
			cv::line(histImage,Point(bin_w*(i-1) , hist_h - cvRound(g_hist.at<float>(i - 1)) ),Point(bin_w*i , hist_h - cvRound(g_hist.at<float>(i))),Scalar(0,255,0),2,8,0);
			cv::line(histImage,Point(bin_w*(i-1) , hist_h - cvRound(r_hist.at<float>(i - 1)) ),Point(bin_w*i , hist_h - cvRound(r_hist.at<float>(i))),Scalar(0,0,255),2,8,0);
		}
		merge(bgr_planes,frame);

		resize(frame,frame,cv::Size(histImage.cols,histImage.rows));
		int totalCols = histImage.cols + frame.cols;
		MergedImage.create(histImage.rows, totalCols,histImage.type());
		Mat subImage = MergedImage.colRange(0,histImage.cols);
		histImage.copyTo(subImage);
		subImage = MergedImage.colRange(histImage.cols , totalCols);
		frame.copyTo(subImage);

		emit NewLineChart(&MergedImage);

		bgr_planes.clear();
		waitKey(30);
	}
	return;
}
void rect_accumulate_rect(CvRect r)
{
	int x = r.x - area_x;
	int y = r.y - area_y;
	if (area_roi.dims > 0)
		area.colRange(x,x+r.width).rowRange(y,y+r.height) |= area_roi.colRange(x,x+r.width).rowRange(y,y+r.height);
	else
		area.colRange(x,x+r.width).rowRange(y,y+r.height) = 1;
}
Example #9
0
 void getGradient(Mat src, Mat& dst, int pos)
 {
     dst = Mat::zeros(src.size(), CV_32F);
     Mat a, b;
     Mat grad = src.colRange(1, src.cols) - src.colRange(0, src.cols - 1);
     grad.copyTo(dst.colRange(pos, src.cols + pos - 1));
     if(pos == 1) {
         src.col(0).copyTo(dst.col(0));
     }
 }
Example #10
0
void assembleArts()
{
	Mat edges;
		
	Canny(grabbedImage,edges,50,150);
	
	int x0,y0,x1,y1;

	if (startRect.x>endRect.x)
	{
		x0=endRect.x;
		x1=startRect.x;
	}
	else
	{
		x1=endRect.x;
		x0=startRect.x;
	}

	if (startRect.y>endRect.y)
	{
		y0=endRect.y;
		y1=startRect.y;
	}
	else
	{
		y1=endRect.y;
		y0=startRect.y;
	}

  	finalImage = Mat(Size((x1-x0)*3,y1-y0),CV_8UC3,Scalar(132,254,255));
	Mat blurMan = finalImage.colRange(0,x1-x0);
	Mat blackMan = finalImage.colRange(x1-x0,2*(x1-x0));
	Mat greenMan = finalImage.colRange(2*(x1-x0),3*(x1-x0));
	

	Mat edgeMask=edges(Rect(x0,y0,x1-x0,y1-y0)).clone();
        edgeMask = (edgeMask == 255);	
	blackMan.setTo(Scalar(0,0,0),edgeMask);
	
	Mat blur = blurMan.clone();
	blur.setTo(Scalar(0,100,0),edgeMask);
	addWeighted(blurMan,0.85,blur,0.15,0.0,blurMan);
	GaussianBlur(blurMan,blurMan,Size(3,7),2,6);
	
	Mat edgeMaskHalfFull = edgeMask.clone();
	Mat edgeMaskToClear = edgeMaskHalfFull.rowRange(0,(int)(0.3*(y1-y0)));
	edgeMaskToClear.setTo(Scalar(0));
	greenMan.setTo(Scalar(0,255,0),edgeMaskHalfFull);
	

	
	imshow("finalImage",finalImage);
}
Example #11
0
static void LieToP(InputArray Lie, OutputArray _P){
    Mat p = Lie.getMat();
    _P.create(4,4,p.type());
    Mat P = _P.getMat();
    if(p.cols==1){
        p = p.t();
    } 
    
    Mat R=rodrigues(p.colRange(Range(0,3)));
    Mat T=p.colRange(Range(3,6)).t();
    hconcat(R,T,P);
    make4x4(P).copyTo(_P);
}
void eyelidsDistances(Mat eyes, int * outputDL, int * outputDR, int leftMaximumOpened, int rightMaximumOpened)
{
	Mat eyesGray;
	cvtColor(eyes, eyesGray, CV_BGR2GRAY);
	int m = eyesGray.rows;
	int n = eyesGray.cols;

	Mat leftEye = eyesGray.colRange(0, n / 2 - 11);
	Mat rightEye = eyesGray.colRange(n / 2 + 10, n);

	*outputDL = eyelidDistance(leftEye, leftMaximumOpened * 0.4);
	*outputDR = eyelidDistance(rightEye, rightMaximumOpened * 0.4);
}
Example #13
0
void normalValidation( DataSet& data, TrainResult& result)
{
	//these vars not needed - use empty Mat
	Mat varIdx, missingDataMask;
	
	
	
	Mat sampleIdx;

	result.train_hr = 0;
	result.test_hr = 0;
	result.fpRate = 0;
	result.fnRate = 0;
	
	//	printf( "numSamples %d", data.numSamples);
	
	//CvBoostTree boost;
	
	//define test and trainingsset
	float partTrain = 1.0/8.0;
	sampleIdx = Mat(1,data.numSamples,CV_8U,1.0);
	
	int negIdx = (int)floor(partTrain*data.numNeg);
	sampleIdx.colRange(negIdx*5, negIdx*6) = 0.0;
	
	
	int posIdx = (int)floor( partTrain*data.numPos );
	sampleIdx.colRange( data.numNeg+posIdx*5, data.numNeg + posIdx*6) = 0.0;
	
	//int numT = (cv::sum( sampleIdx ))[0];
	//printf("sample Idx sum (trainsamples): %d\n",numT);
	
	int numTestSamples = negIdx + posIdx;
	printf("numSamples: %d -- numTrainSamples: %d -- numTestSamples: %d\n",data.numSamples, data.numSamples-numTestSamples, numTestSamples );
	
	
	//training
	forest.train(data.data, CV_ROW_SAMPLE, data.responses, varIdx, sampleIdx, data.varType, missingDataMask, forestParams);
	
	//booster.train(data.data, CV_ROW_SAMPLE, data.responses, varIdx, sampleIdx, data.varType, missingDataMask, boostParams);
	
	//evaluation
	evaluation(forest, data, sampleIdx, result);
	
	
	double sum = (cv::sum(result.var_importance))[0];
	result.var_importance /= sum;
	
	printf( "____\nRecognition rate: train = %.2f%%, test = %.2f%% -- overall FN = %.2f%%, FP = %.2f%%\n",
		   result.train_hr*100., result.test_hr*100. ,result.fnRate*100. ,result.fpRate*100.);
}
Example #14
0
static void LieToRT(InputArray Lie, OutputArray _R, OutputArray _T){
    Mat p = Lie.getMat();
    _R.create(3,3,CV_64FC1);
    Mat R = _R.getMat();
    _T.create(3,1,CV_64FC1);
    Mat T = _T.getMat();
    if(p.cols==1){
        p = p.t();
    }
        
    rodrigues(p.colRange(Range(0,3))).copyTo(R);
    Mat(p.colRange(Range(3,6)).t()).copyTo(T);


}
Example #15
0
File: ecc.cpp Project: 4auka/opencv
static void image_jacobian_translation_ECC(const Mat& src1, const Mat& src2, Mat& dst)
{

    CV_Assert( src1.size()==src2.size());

    CV_Assert( src1.rows == dst.rows);
    CV_Assert(dst.cols == (src1.cols*2));
    CV_Assert(dst.type() == CV_32FC1);

    const int w = src1.cols;

    //compute Jacobian blocks (2 blocks)
    src1.copyTo(dst.colRange(0, w));
    src2.copyTo(dst.colRange(w, 2*w));
}
Example #16
0
void convolve_matrix_region( Mat& src_mat, const int start_row, const int end_row, const int start_col, const int end_col, Mat& dst_mat, Mat& kernel )
{   
   int size_kernel = kernel.rows;
   int size_kernel_div_2 = size_kernel / 2;
   int adjusted_start_row = start_row;
   int adjusted_end_row = end_row;
   int adjusted_start_col = start_col;
   int adjusted_end_col = end_col;
   
   if( adjusted_start_row < 0)
      adjusted_start_row = 0;
      
   if( adjusted_start_col < 0)
      adjusted_start_col = 0;
      
   if( adjusted_end_row > src_mat.rows)
      adjusted_end_row = src_mat.rows;
      
   if( adjusted_end_col > src_mat.cols)
      adjusted_end_col = src_mat.cols;   
   
   Mat sub_mat = src_mat.colRange(adjusted_start_col, adjusted_end_col).rowRange(adjusted_start_row, adjusted_end_row);
   dst_mat = sub_mat;
   //filter2D( sub_mat, sub_mat, -1, kernel );
   //dst_mat = sub_mat;
   //if( size_kernel_div_2 + ( end_col - start_col) 
   
   //dst_mat = sub_mat.colRange( size_kernel_div_2, size_kernel_div_2 + ( end_col - start_col) ).rowRange( size_kernel_div_2, size_kernel_div_2 + ( end_row - start_row) );
}
Example #17
0
/** @function main */
int main( )
{
    VideoCapture cap("./write3.mp4"); // open the default camera
    if(!cap.isOpened())  // check if we succeeded
        return -1;
//    time_t start,end;
    for(k = 0;;k++)
    {
        auto t1 = chrono::high_resolution_clock::now();
        cap >> src;
//                src=src.colRange(120,600);
        if(src.empty())
            continue;
        src=src.colRange(180,540);
//        src=src.rowRange(100,300);

        cvtColor( src, src_gray, CV_BGR2GRAY );
//          blur( src_gray, src_gray, Size(3,3) );


          /// Create Window
      if(src.size().height==0||src.size().width==0)
          continue;
            regionSelect();
             auto t2 = std::chrono::high_resolution_clock::now();
//          cout<<" Time taken -"<<chrono::duration_cast<chrono::milliseconds>(t2-t1).count()<<endl;
//          waitKey();
    }

//  waitKey(0);
  return(0);
}
Example #18
0
Mat Encoder::getNthBlock(int n, Mat gob){ // this is 0 indexed
	assert(n >= 0 && n < Options::M * Options::M);
	int rowStart = int(n / Options::M) * Options::blockSize;
	int colStart = int(n % Options::M) * Options::blockSize;
//	 std::cout << rowStart << ", " << colStart << "\n";
	return gob.colRange(colStart, colStart+Options::blockSize).rowRange(rowStart, rowStart + Options::blockSize).clone().reshape(1, pow(Options::blockSize, 2));
}
void GaussSeidelSolver::computeDifferentialAdjust(Mat xDifferential, Mat yDifferential, Mat& adjust) {
	int numRow = xDifferential.rows;
	int numCol = xDifferential.cols;
	adjust = Mat::zeros(numRow, numCol, CV_32FC1);

	Mat yTail = yDifferential.rowRange(1, numRow);
	Mat yHead = yDifferential.rowRange(0, numRow-1);
	yTail -= yHead.clone();

	Mat xTail = xDifferential.colRange(1, numCol);
	Mat xHead = xDifferential.colRange(0, numCol-1);
	xTail -= xHead.clone();

	adjust += xDifferential;
	adjust += yDifferential;
	adjust /= 4.0;
}
void CV_ArucoDetectionMarkerSize::run(int) {

    Ptr<aruco::Dictionary> dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250);
    int markerSide = 20;
    int imageSize = 200;

    // 10 cases
    for(int i = 0; i < 10; i++) {
        Mat marker;
        int id = 10 + i * 20;

        // create synthetic image
        Mat img = Mat(imageSize, imageSize, CV_8UC1, Scalar::all(255));
        aruco::drawMarker(dictionary, id, markerSide, marker);
        Mat aux = img.colRange(30, 30 + markerSide).rowRange(50, 50 + markerSide);
        marker.copyTo(aux);

        vector< vector< Point2f > > corners;
        vector< int > ids;
        aruco::DetectorParameters params;

        // set a invalid minMarkerPerimeterRate
        params.minMarkerPerimeterRate = min(4., (4. * markerSide) / float(imageSize) + 0.1);
        aruco::detectMarkers(img, dictionary, corners, ids, params);
        if(corners.size() != 0) {
            ts->printf(cvtest::TS::LOG, "Error in DetectorParameters::minMarkerPerimeterRate");
            ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
            return;
        }

        // set an valid minMarkerPerimeterRate
        params.minMarkerPerimeterRate = max(0., (4. * markerSide) / float(imageSize) - 0.1);
        aruco::detectMarkers(img, dictionary, corners, ids, params);
        if(corners.size() != 1 || (corners.size() == 1 && ids[0] != id)) {
            ts->printf(cvtest::TS::LOG, "Error in DetectorParameters::minMarkerPerimeterRate");
            ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
            return;
        }

        // set a invalid maxMarkerPerimeterRate
        params.maxMarkerPerimeterRate = min(4., (4. * markerSide) / float(imageSize) - 0.1);
        aruco::detectMarkers(img, dictionary, corners, ids, params);
        if(corners.size() != 0) {
            ts->printf(cvtest::TS::LOG, "Error in DetectorParameters::maxMarkerPerimeterRate");
            ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
            return;
        }

        // set an valid maxMarkerPerimeterRate
        params.maxMarkerPerimeterRate = max(0., (4. * markerSide) / float(imageSize) + 0.1);
        aruco::detectMarkers(img, dictionary, corners, ids, params);
        if(corners.size() != 1 || (corners.size() == 1 && ids[0] != id)) {
            ts->printf(cvtest::TS::LOG, "Error in DetectorParameters::maxMarkerPerimeterRate");
            ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
            return;
        }
    }
}
/* Creates a matrix header for the specified row span.  */
int ns__colRange(  struct soap *soap, 
			std::string InputMatFilename,
			int startCol=0, int endCol=0, 
			std::string &OutputMatFilename=ERROR_FILENAME)
{
	bool timeChecking, memoryChecking;
	getConfig(timeChecking, memoryChecking);
	if(timeChecking){
		start = omp_get_wtime();
	}

    /* read from bin */
    Mat src;
	Mat dst;
	if(!readMat(InputMatFilename, src))
    {
		Log(logERROR) << "colRange:: can not read bin file for src" << std::endl;
        return soap_receiver_fault(soap, "colRange :: can not read bin file for src", NULL);
    }
    
    try{    
        dst = src.colRange(startCol, endCol);
    } catch( cv::Exception& e ) {
        Log(logERROR) << e.what() << std::endl;
        return soap_receiver_fault(soap, e.what(), NULL);
    }

	std::string toAppend = "_colRange";
    getOutputFilename(OutputMatFilename, toAppend);
    if(!saveMat(OutputMatFilename, dst))
    {
        Log(logERROR) << "colRange :: can not save mat to binary file" << std::endl;
        return soap_receiver_fault(soap, "colRange :: can not save mat to binary file", NULL);
    }

    src.release();
	dst.release();

	if(timeChecking) 
	{ 
		end = omp_get_wtime();
		Log(logINFO) << "colRange :: " << "time elapsed " << end-start << std::endl;
	}
	
	if(memoryChecking)
	{	
		double vm, rss;
		getMemoryUsage(vm, rss);
		Log(logINFO)<< "colRange :: VM usage :" << vm << std::endl 
					<< "Resident set size :" << rss << std::endl;
	}

    return SOAP_OK;
}
Example #22
0
double mycalibrateCamera( InputArrayOfArrays _objectPoints,
                            InputArrayOfArrays _imagePoints,
                            Size imageSize, InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs,
                            OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, int flags, TermCriteria criteria )
{
    int rtype = CV_64F;
    Mat cameraMatrix = _cameraMatrix.getMat();
    cameraMatrix = prepareCameraMatrix(cameraMatrix, rtype);
    Mat distCoeffs = _distCoeffs.getMat();
    distCoeffs = prepareDistCoeffs(distCoeffs, rtype);
    if( !(flags & CALIB_RATIONAL_MODEL) )
        distCoeffs = distCoeffs.rows == 1 ? distCoeffs.colRange(0, 5) : distCoeffs.rowRange(0, 5);

    int i;
    size_t nimages = _objectPoints.total();
    CV_Assert( nimages > 0 );
    Mat objPt, imgPt, npoints, rvecM((int)nimages, 3, CV_64FC1), tvecM((int)nimages, 3, CV_64FC1);
    collectCalibrationData( _objectPoints, _imagePoints, noArray(),
                            objPt, imgPt, 0, npoints );
    CvMat c_objPt = objPt, c_imgPt = imgPt, c_npoints = npoints;
    CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
    CvMat c_rvecM = rvecM, c_tvecM = tvecM;

    double reprojErr = cvCalibrateCamera2(&c_objPt, &c_imgPt, &c_npoints, imageSize,
                                          &c_cameraMatrix, &c_distCoeffs, &c_rvecM,
                                          &c_tvecM, flags, criteria );

    bool rvecs_needed = _rvecs.needed(), tvecs_needed = _tvecs.needed();

    if( rvecs_needed )
        _rvecs.create((int)nimages, 1, CV_64FC3);
    if( tvecs_needed )
        _tvecs.create((int)nimages, 1, CV_64FC3);

    for( i = 0; i < (int)nimages; i++ )
    {
        if( rvecs_needed )
        {
            _rvecs.create(3, 1, CV_64F, i, true);
            Mat rv = _rvecs.getMat(i);
            memcpy(rv.data, rvecM.ptr<double>(i), 3*sizeof(double));
        }
        if( tvecs_needed )
        {
            _tvecs.create(3, 1, CV_64F, i, true);
            Mat tv = _tvecs.getMat(i);
            memcpy(tv.data, tvecM.ptr<double>(i), 3*sizeof(double));
        }
    }
    cameraMatrix.copyTo(_cameraMatrix);
    distCoeffs.copyTo(_distCoeffs);

    return reprojErr;
}
Example #23
0
void extractFeatures(Mat image, Mat& featureVector)
{
    /* Features are: the 7 Hu moments + average hue */
    Mat grayImage, hsvImage;
    cvtColor(image, grayImage, CV_BGR2GRAY);
    cvtColor(image, hsvImage, CV_BGR2HSV);
    float meanHue = mean(hsvImage)[0];
    Moments m = moments(grayImage);
    HuMoments(m, featureVector.colRange(0, 6));
    featureVector.at<float>(0, 7) = meanHue / 360;
}
Example #24
0
File: ecc.cpp Project: 4auka/opencv
static void project_onto_jacobian_ECC(const Mat& src1, const Mat& src2, Mat& dst)
{
    /* this functions is used for two types of projections. If src1.cols ==src.cols
    it does a blockwise multiplication (like in the outer product of vectors)
    of the blocks in matrices src1 and src2 and dst
    has size (number_of_blcks x number_of_blocks), otherwise dst is a vector of size
    (number_of_blocks x 1) since src2 is "multiplied"(dot) with each block of src1.

    The number_of_blocks is equal to the number of parameters we are lloking for
    (i.e. rtanslation:2, euclidean: 3, affine: 6, homography: 8)

    */
    CV_Assert(src1.rows == src2.rows);
    CV_Assert((src1.cols % src2.cols) == 0);
    int w;

    float* dstPtr = dst.ptr<float>(0);

    if (src1.cols !=src2.cols){//dst.cols==1
        w  = src2.cols;
        for (int i=0; i<dst.rows; i++){
            dstPtr[i] = (float) src2.dot(src1.colRange(i*w,(i+1)*w));
        }
    }

    else {
        CV_Assert(dst.cols == dst.rows); //dst is square (and symmetric)
        w = src2.cols/dst.cols;
        Mat mat;
        for (int i=0; i<dst.rows; i++){

            mat = Mat(src1.colRange(i*w, (i+1)*w));
            dstPtr[i*(dst.rows+1)] = (float) pow(norm(mat),2); //diagonal elements

            for (int j=i+1; j<dst.cols; j++){ //j starts from i+1
                dstPtr[i*dst.cols+j] = (float) mat.dot(src2.colRange(j*w, (j+1)*w));
                dstPtr[j*dst.cols+i] = dstPtr[i*dst.cols+j]; //due to symmetry
            }
        }
    }
}
Example #25
0
Mat paramsToProjection(const Mat & p,const Mat& _cameraMatrix){
    //Build the base transform
    assert(p.type()==CV_64FC1);
    Mat dR=rodrigues(p.colRange(Range(0,3)));
    Mat dT=p.colRange(Range(3,6)).t();
    Mat dA;
    hconcat(dR,dT,dA);
    dA=make4x4(dA);
    Mat cameraMatrix=make4x4(_cameraMatrix);
    assert(cameraMatrix.type()==CV_64FC1);
    Mat proj=cameraMatrix*dA*cameraMatrix.inv();
//     cout<<"p: "<<"\n"<< p<< endl;
//     cout<<"Proj: "<<"\n"<< proj<< endl;
    //The column swap
    Mat tmp=proj.colRange(2,4).clone();
    tmp.col(1).copyTo(proj.col(2));
    tmp.col(0).copyTo(proj.col(3));
    //The row drop
    proj=proj.rowRange(0,3);
    return proj;
}
void GaussSeidelSolver::scaleDownDifferentialAndHeight(
		Mat xDifferential,
		Mat yDifferential,
		Mat heightMat,
		Mat& xDiffSmall,
		Mat& yDiffSmall,
		Mat& heightMatSmall) {

	Size smallSize;
	smallSize.height = heightMat.rows / 2;
	smallSize.width = heightMat.cols / 2;

	resize(xDifferential, xDiffSmall, smallSize);
	resize(yDifferential, yDiffSmall, smallSize);
	resize(heightMat, heightMatSmall, smallSize);

	Mat coarseXTail = xDiffSmall.colRange(1, smallSize.width);
	coarseXTail += xDiffSmall.colRange(0, smallSize.width-1).clone();

	Mat coarseYTail = yDiffSmall.rowRange(1, smallSize.height);
	coarseYTail += yDiffSmall.rowRange(0, smallSize.height-1).clone();
}
Example #27
0
void Features::getROIDescription(Mat & featureDescription,
                                 const SourcesMap & sources,
                                 const vector<Rect> & roi)
{
    featureDescription.create(roi.size(), getTotalFeatureVectorLength(), CV_32F);
    int k = 0;
    for (size_t i = 0; i < featuresSet.size(); ++i)
    {
        int descriptionLength = featuresSet[i]->getFeatureVectorLength();
        Mat currentFeatureDescription = featureDescription.colRange(k, k + descriptionLength);
        featuresSet[i]->getROIDescription(currentFeatureDescription, sources, roi);
        k += descriptionLength;
    }
}
Example #28
0
static inline void solveJacobian(Mat& dMdp,
                          Mat& G,
                          Mat& J,
                          Mat& err,
                          Mat& p,
                          int numParams)
{
    int c=dMdp.cols;
    int nb=c/BSZ;
    Mat H(numParams,numParams,CV_64FC1);
    H=0.0;
    for (int i=0;i<nb;i++){
        int offset = i*BSZ;
        Mat _dMdp=dMdp.colRange(Range(offset,offset+BSZ));
        Mat _G=G.colRange(Range(offset,offset+BSZ));
        Mat _J=J.colRange(Range(offset,offset+BSZ));
        JacobianCore(_dMdp, _G, _J, H, numParams);
    }
    cout<<"Summed Hessian: "<<H<<endl;
    cout<<"Recalculated Hessian: "<<J*J.t()<<endl;
    // Now J has been filled out and H is complete
    p+=H.inv()*(J*err);
}
Example #29
0
void Objectness::trainStageI()
{
    vecM pX, nX;
    pX.reserve(200000), nX.reserve(200000);
    Mat xP1f, xN1f;
    CV_Assert(matRead(_modelName + ".xP", xP1f) && matRead(_modelName + ".xN", xN1f));
    for (int r = 0; r < xP1f.rows; r++)
        pX.push_back(xP1f.row(r));
    for (int r = 0; r < xN1f.rows; r++)
        nX.push_back(xN1f.row(r));
    Mat crntW = trainSVM(pX, nX, L1R_L2LOSS_SVC, 10, 1);
    crntW = crntW.colRange(0, crntW.cols - 1).reshape(1, _W);
    CV_Assert(crntW.size() == Size(_W, _W));
    matWrite(_modelName + ".wS1", crntW);
}
TEST_P(Reproducibility_MobileNet_SSD, Accuracy)
{
    const string proto = findDataFile("dnn/MobileNetSSD_deploy.prototxt", false);
    const string model = findDataFile("dnn/MobileNetSSD_deploy.caffemodel", false);
    Net net = readNetFromCaffe(proto, model);
    int targetId = GetParam();
    const float l1 = (targetId == DNN_TARGET_OPENCL_FP16) ? 1.5e-4 : 1e-5;
    const float lInf = (targetId == DNN_TARGET_OPENCL_FP16) ? 4e-4 : 1e-4;

    net.setPreferableTarget(targetId);

    Mat sample = imread(_tf("street.png"));

    Mat inp = blobFromImage(sample, 1.0f / 127.5, Size(300, 300), Scalar(127.5, 127.5, 127.5), false);
    net.setInput(inp);
    Mat out = net.forward();

    const float scores_diff = (targetId == DNN_TARGET_OPENCL_FP16) ? 4e-4 : 1e-5;
    const float boxes_iou_diff = (targetId == DNN_TARGET_OPENCL_FP16) ? 5e-3 : 1e-4;
    Mat ref = blobFromNPY(_tf("mobilenet_ssd_caffe_out.npy"));
    normAssertDetections(ref, out, "", 0.0, scores_diff, boxes_iou_diff);

    // Check that detections aren't preserved.
    inp.setTo(0.0f);
    net.setInput(inp);
    out = net.forward();
    out = out.reshape(1, out.total() / 7);

    const int numDetections = out.rows;
    ASSERT_NE(numDetections, 0);
    for (int i = 0; i < numDetections; ++i)
    {
        float confidence = out.ptr<float>(i)[2];
        ASSERT_EQ(confidence, 0);
    }

    // Check batching mode.
    ref = ref.reshape(1, numDetections);
    inp = blobFromImages(std::vector<Mat>(2, sample), 1.0f / 127.5, Size(300, 300), Scalar(127.5, 127.5, 127.5), false);
    net.setInput(inp);
    Mat outBatch = net.forward();

    // Output blob has a shape 1x1x2Nx7 where N is a number of detection for
    // a single sample in batch. The first numbers of detection vectors are batch id.
    outBatch = outBatch.reshape(1, outBatch.total() / 7);
    EXPECT_EQ(outBatch.rows, 2 * numDetections);
    normAssert(outBatch.rowRange(0, numDetections), ref, "", l1, lInf);
    normAssert(outBatch.rowRange(numDetections, 2 * numDetections).colRange(1, 7), ref.colRange(1, 7),
               "", l1, lInf);
}