Esempio n. 1
0
void imgproc(const uint8_t *image, int width, int height)
{
  cv::Mat img(height, width, CV_8UC1, const_cast<uint8_t*>(image), width);
  imshow("Original", img);
  cv::waitKey(1);
  return;

  cv::Mat src = img.clone();
  cv::Mat color_src(height, width, CV_8UC3);
  cvtColor(src, color_src, CV_GRAY2RGB);

  // Image processing starts here
  GaussianBlur(src, src, cv::Size(3,3), 0);
  adaptiveThreshold(src, src, 255, cv::ADAPTIVE_THRESH_GAUSSIAN_C, cv::THRESH_BINARY_INV, 5, 3);
  //equalizeHist(src, src);
  // TODO: Can think about using multiple thresholds and choosing one where
  // we can detect a pattern
  //threshold(src, src, 100, 255, cv::THRESH_BINARY_INV);

  imshow("Thresholded", src);

  std::vector<std::vector<cv::Point> > contours;

  findContours(src, contours, CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE);
  //printf("Num contours: %lu\n", contours.size());

  std::vector<double> contour_area, contour_arclength;
  contour_area.resize(contours.size());
  contour_arclength.resize(contours.size());
  std::vector<unsigned int> circle_index;
  for(unsigned int idx = 0; idx < contours.size(); idx++)
  {
    if(contours[idx].size() > 25)
    {
      cv::Mat contour(contours[idx]);
      contour_area[idx] = contourArea(contour);
      if(contour_area[idx] > 50)
      {
        contour_arclength[idx] = arcLength(contour,true);
        float q = 4*M_PI*contour_area[idx] /
            (contour_arclength[idx]*contour_arclength[idx]);
        if(q > 0.8f)
        {
          circle_index.push_back(idx);
          //printf("isoperimetric quotient: %f\n", q);
          //Scalar color( rand()&255, rand()&255, rand()&255 );
          //drawContours(contours_dark, contours, idx, color, 1, 8);
        }
      }
    }
  }
  std::list<Circle> circles;
  for(unsigned int i = 0; i < circle_index.size(); i++)
  {
    Circle c;
    cv::Moments moment = moments(contours[circle_index[i]]);
    float inv_m00 = 1./moment.m00;
    c.center = cv::Point2f(moment.m10*inv_m00, moment.m01*inv_m00);
    c.radius = (sqrtf(contour_area[circle_index[i]]/M_PI) + contour_arclength[circle_index[i]]/(2*M_PI))/2.0f;
    circles.push_back(c);
  }

  // Get the circles with centers close to each other
  std::vector<std::list<Circle> > filtered_circles;
  std::list<Circle>::iterator it = circles.begin();
  unsigned int max_length = 0;
  while(it != circles.end())
  {
    std::list<Circle> c;
    c.push_back(*it);

    cv::Point c1 = it->center;

    std::list<Circle>::iterator it2 = it;
    it2++;
    while(it2 != circles.end())
    {
      cv::Point c2 = it2->center;
      std::list<Circle>::iterator it3 = it2;
      it2++;
      if(hypotf(c2.x - c1.x, c2.y - c1.y) < 10)
      {
        c.push_back(*it3);
        circles.erase(it3);
      }
    }
    unsigned int length_c = c.size();
    if(length_c > 1 && length_c > max_length)
    {
      max_length = length_c;
      filtered_circles.push_back(c);
    }

    it2 = it;
    it++;
    circles.erase(it2);
  }

  if(filtered_circles.size() > 0)
  {
    Circle target_circle;
    target_circle.radius = std::numeric_limits<float>::max();

    for(it = filtered_circles.back().begin(); it != filtered_circles.back().end(); it++)
    {
      //printf("circle: c: %f, %f, r: %f\n", it->center.x, it->center.y, it->radius);
      if(it->radius < target_circle.radius)
      {
        target_circle.radius = it->radius;
        target_circle.center = it->center;
      }
    }
    circle(color_src, cv::Point(target_circle.center.x, target_circle.center.y), target_circle.radius, cv::Scalar(0,0,255), 2);
    printf("target: c: %f, %f, r: %f\n", target_circle.center.x, target_circle.center.y, target_circle.radius);

  }
#if defined(CAPTURE_VIDEO)
  static cv::VideoWriter video_writer("output.mp4", CV_FOURCC('M','J','P','G'), 20, cv::Size(width, height));
  video_writer.write(color_src);
#endif
  imshow("Target", color_src);
  cv::waitKey(1);
}
void TargetExtractor::regionGrow2(int areaThreshold, int diffThreshold)
{
    Mat gray;
    cvtColor(mFrame, gray, CV_BGR2GRAY);

    Mat temp;
    mMask.copyTo(temp);

    vector<vector<Point> > contours;
    findContours(temp, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

    int maxStackSize = gray.rows * gray.cols / 4;
    static int direction[8][2] = {
        { 0, 1 }, { 1, 1 }, { 1, 0 }, { 1, -1 },
        { 0, -1 }, { -1, -1 }, { -1, 0 }, { -1, 1 }
    };

    for (int i = 0; i < contours.size(); i++) {
        if (contourArea(contours[i]) < areaThreshold) {
            drawContours(mMask, contours, i, Scalar(0), CV_FILLED);
            continue;
        }

        // TODO: 修改种子选取方法
        Moments mu = moments(contours[i], false);
        Point seed(cvRound(mu.m10 / mu.m00), cvRound(mu.m01 / mu.m00));
        if (pointPolygonTest(contours[i], seed, false) < 0) {
            cout << "Seed not in contour!" << endl;
            continue;
        }

        stack<Point> pointStack;
        temp.at<uchar>(seed) = 255;
        pointStack.push(seed);

        Mat temp = Mat::zeros(mMask.size(), mMask.type());
        uchar seedPixel = gray.at<uchar>(seed);
        Point cur, pop;

        while (!pointStack.empty() && pointStack.size() < maxStackSize) {

            pop = pointStack.top();
            pointStack.pop();

            for (int k = 0; k < 8; k++) {
                cur.x = pop.x + direction[k][0];
                cur.y = pop.y + direction[k][1];

                if (cur.x < 0 || cur.x > gray.cols - 1 || cur.y < 0 || cur.y > gray.rows - 1) {
                    continue;
                }

                if (temp.at<uchar>(cur) != 255) {
                    uchar curPixel = gray.at<uchar>(cur);

                    if (abs(curPixel - seedPixel) < diffThreshold) {
                        temp.at<uchar>(cur) = 255;
                        pointStack.push(cur);
                    }
                }
            }
        }
        if (pointStack.empty()) {
            bitwise_or(mMask, temp, mMask);
        }
    }
}
Esempio n. 3
0
void binfileread(char * binfile)
{

	ifstream binarray(binfile, ios::in|ios::binary|ios::ate);
	
	if(binarray.is_open())
	{
		int filesize = binarray.tellg();
		int numberofvalues = filesize / sizeof(double);
		cout << "Binary File size is: " << filesize << endl;
		cout << "The number of values from the binary file is: " << numberofvalues << endl;
		
		dmemblock = new double [filesize];
		dmemallocated = true;
		binarray.seekg (0, ios::beg);
		binarray.read(reinterpret_cast<char *>(dmemblock), filesize);
		
		for(int i = 0; i < numberofvalues; i++)
		{
			cout << "Sample " << i << ": " << dmemblock[i] << endl;
		}
		
		binarray.close();
		cout << endl;
		cout << "The complete binary file content is in memory" << endl;
		cout << endl;

	// For alice.txt, Fs = 8kHz and a 'dit' is about 75ms in duration
	// A'dah' will be three times longer than a 'dit'
	// Choosing a 10ms window (80 samples) should give enough resolution
	//
	// ... Add your Code here .....
		/*BEGIN OFChoosing a 10ms Window
		double dresmemblock[numberofvalues];
		int tmp = 0;
		
		for(int i = 0; i < numberofvalues; i++)
		{
			//cout << "Sample " << i << ": " << dmemblock[i] << endl;
			for(int j = 0; j < 80; j++)
			{
				dresmemblock[i] = dresmemblock[i] + dmemblock[i+j];
				if(j == 79)
				{
				dresmemblock[i] = dresmemblock[i] / 80;
				tmp = i+j;
				i = tmp;
				}
			}
			
			//dresmemblock[i] = dmemblock[i];
			cout << "10ms Window #" << (i + 1)/80 << " "<< dresmemblock[i] << endl;
		}
		END OF choosing aa 10ms Window*/
		
		/*int tmp = 0, tmp2 = 0;
		
		for(int i = 0; i < numberofvalues; i++)
		{
			if(dmemblock[i] > 0.08998 || dmemblock[i] < -0.08998)
			{
				tmp = tmp + 1;
				//cout << 1;
				if(tmp == 1800)
				{
					cout << "-";
					tmp = 0;
				}
				if(tmp == 600)
				{
					cout << ".";
					tmp = 0;
				}
			}
			else
			{
				tmp2 = tmp2 + 1;
				if(tmp2 == 600)
				{
				}
				if(tmp2 == 1800)
				{
					cout << "/";
					tmp2 = 0;
				}
				if(tmp2 == 4200)
				{
					cout << " / ";
					tmp2 = 0;
				}
				//cout << 0;
			}
			//cout << "Sample " << i << ": " << dmemblock[i] << endl;
		}*/
	//
		cout << endl;
	moments(dmemblock, numberofvalues);

	}
	else
	{
		cout << "Unable to open file";
	}

	if(dmemallocated)
	{
		free(dmemblock);
	}

	
} // End of binarrayread()
void FindLargest_ProjectionVoxel(int ImageNum, vector<OctVoxel>& Octree, vector<cv::Mat>& Silhouette, Cpixel*** vertexII, CMatrix* ART){

	int thresh = 70;
	int max_thresh = 210;
	RNG rng(12345);

	Mat src_gray;
	Mat drawing;

	double scale(0.7);
	Size ssize;
	CVector M(4);		//Homogeneous coordinates of the vertices(x,y,z,1) world coordinate
	CVector m(4);		//That the image coordinates (normalized) expressed in homogeneous coordinates
	M[3] = 1.0;
	//8 vertices world coordinates of the voxel (x,y,z)
	CVector3d vertexW[8];

	ofstream fout("larget_boundingbox_contour.txt");

	int Boundingbox_line[12][2] = { { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 0 },
	{ 0, 4 }, { 1, 5 }, { 2, 6 }, { 3, 7 },
	{ 4, 5 }, { 5, 6 }, { 6, 7 }, { 7, 4 } };

	//---------------------------------------------------------------	
	for (auto h(0); h < ImageNum; h++){
		//src_gray = Silhouette[h];         	
		Silhouette[h].copyTo(src_gray);
		cout << "Silhouette_" << h << endl;

		for (auto j(0); j < Octree.size(); j++){

			Octree[j].SetVertexWorld_Rotated(vertexW);
			for (int k = 0; k < 8; ++k){	//8 vertices of the voxel
				M[0] = vertexW[k].x;
				M[1] = vertexW[k].y;
				M[2] = vertexW[k].z;
				m = ART[h] * M;
				vertexII[h][j][k].setPixel_u_v((int)(m[0] / m[2]), (int)(m[1] / m[2]));  // normalize
			}

			//-------------------------------------- bounding box ------------------------
			for (auto k(0); k < 12; k++){
				//Draw 12 lines of the voxel in img.
				Start_point.x = vertexII[h][j][Boundingbox_line[k][0]].getPixel_u();
				Start_point.y = vertexII[h][j][Boundingbox_line[k][0]].getPixel_v();
				PointStart.push_back(Start_point);
				End_point.x = vertexII[h][j][Boundingbox_line[k][1]].getPixel_u();
				End_point.y = vertexII[h][j][Boundingbox_line[k][1]].getPixel_v();
				PointEnd.push_back(End_point);

				//line(src_gray, Start_point, End_point, Scalar(225, 225,255), 2.0, CV_AA);
			}
		}
		

		Mat canny_output;
		vector<vector<Point> > contours;
		vector<Vec4i> hierarchy;

		double max_contour_area(0.0);
		int largest_contour_index(0);
		
		/// Detect edges using canny
		Canny(src_gray, canny_output, thresh, max_thresh, 3);
		/// Find contours
		//findContours(canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
		findContours(canny_output, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE, Point(0, 0));

		/// Draw contours
		drawing = Mat::zeros(canny_output.size(), CV_8UC3);

		for (auto n(0); n < PointEnd.size(); n++){
			line(drawing, PointStart[n], PointEnd[n], Scalar(225, 225, 225), 1.0, 1, 0);
		}

		/// Get the moments
		vector<Moments> mu(contours.size());
		for (int i = 0; i < contours.size(); i++)
		{
			mu[i] = moments(contours[i], false);
			//cout << "# of contour points: " << contours[i].size() << endl;
			for (int j = 0; j < contours[i].size(); j++)
			{
				//cout << "Point(x,y)=" <<i<<" j "<<j<<" "<< contours[i][j] << endl;
			}
		}
		////  Get the mass centers:
		vector<Point2f> mc(contours.size());
		for (int i = 0; i < contours.size(); i++)
		{
			mc[i] = Point2f(mu[i].m10 / mu[i].m00, mu[i].m01 / mu[i].m00);
		}
		//// ---------- - Find the convex hull object for each contour
			vector<vector<Point>>hull(contours.size());
		for (int i = 0; i < contours.size(); i++){
			convexHull(Mat(contours[i]), hull[i], false);
		}				
		
		// Calculate the area with the moments 00 and compare with the result of the OpenCV function
		//printf("\t Info: Area and Contour Length \n");

		//cout << "contours.size() " << contours.size() << endl;
		double countour_Area(0.0);
		double arc_Length(0.0);

		for (int i = 0; i < contours.size(); i++)
		{
			countour_Area = (double)contourArea(contours[i]);
			arc_Length = (double)arcLength(contours[i], true);

			//cout << "contourArea [" << i << "] " << ": Moment " << mu[i].m00 
			//	 << " OpenCV " << countour_Area << " arcLength " << arc_Length << endl;		
			//cout << "countour_Area "<< countour_Area << " " << endl;

			if (countour_Area > max_contour_area){
				max_contour_area = countour_Area;
				largest_contour_index = i;
			}

			//------- draw all contour ---------------
			//Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
			//drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, Point());
			//circle(drawing, mc[i], 4, color, -1, 8, 0);
			//drawContours(drawing, hull, i, color, 1, 8, vector<Vec4i>(), 0, Point());
			//drawContours(drawing, contours, i, Scalar(255, 255, 255), 0.10, 8, hierarchy, 0, Point());

		}
		//------- draw largest contour ---------------
		Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
		drawContours(drawing, contours, largest_contour_index, color, 2, 8, hierarchy, 0, Point());
		//circle(drawing, mc[largest_contour_index], 4, color, -1, 8, 0);		
		//drawContours(drawing, contours, largest_contour_index, Scalar(0, 255, 255), 2, 8, hierarchy, 0, Point());
		//drawContours(drawing, hull, largest_contour_index, color, 2, 8, vector<Vec4i>(), 0, Point());
		//drawContours(drawing, contours, largest_contour_index, Scalar(255, 255, 255), 1, 8, hierarchy, 0, Point());

		fout << max_contour_area << endl;
		cout << "max_contour_area " << max_contour_area << endl;	
		
		//----------------------- Show in a window --------------------------------------
		//resize(drawing, drawing, ssize, INTER_NEAREST);
		namedWindow("Contours", CV_WINDOW_AUTOSIZE);
		imshow("Contours", drawing);

		//output white boundary
		imwrite("../../data2016/input/newzebra/contour_voxel/contour_voxel" + to_string(h) + ".bmp", drawing);

		waitKey(0);
		destroyWindow("silhouette");

		PointStart.clear();
		PointStart.shrink_to_fit();
		PointEnd.clear();
		PointEnd.shrink_to_fit();
	}

	//getchar();
}
Esempio n. 5
0
void sktinfoextractor::hacerUpdate(){
    int hhh=cam->numContextosIniciados;
    if(cam->numContextosIniciados>0){
        if(numFrames==0){
#ifdef _WIN32 || _WIN64
            GetSystemTime(&inicio);
#else
            gettimeofday(&inicio,NULL);
#endif
        }
        bool* update=new bool[cam->numContextosIniciados];

        //if(cam->updateCam(true,0,true,false)){
        int tid;

        Moments moms;
        int b;
        Point2f pt;
        if(cam->numContextosIniciados>1){
            omp_set_num_threads(2);
#pragma omp parallel private(tid,moms,b,pt) shared(update)
            {
                /// TODO : FIX FOR LESS AVAILABLE THREADS THAN CONTEXTS
                tid = omp_get_thread_num();
                //if(tid!=0){
                //  tid--;
                cameras* cc=cam;
                if(tid<cc->numContextosIniciados){
                    if(cc->updateCam(false,tid,true,false)){
                        cc->retriveDepthAndMask(tid,depths[tid],masks[tid]);
                        cc->retrivePointCloudFast(tid,&(depths[tid]),&(pCs[tid]));
                        if(cConfig[tid]->method=="Background"){
                            depths[tid].convertTo(depthsS[tid],CV_32FC1,1.0/6000);
                            sktP[tid]->processImage(depthsS[tid],toBlobs[tid],masks[tid]);
                        }
                        else
                        {
                            sktP[tid]->processImage(pCs[tid],toBlobs[tid],masks[tid]);
                        }
                        findContours(toBlobs[tid], blobs[tid],CV_RETR_LIST,CV_CHAIN_APPROX_NONE);
                        finBlobs[tid].clear();
                        blobSizes[tid].clear();
                        for(b=0; b<blobs[tid].size(); b++)
                        {
                            moms=moments(blobs[tid][b],true);
                            if(moms.m00>cConfig[tid]->minBlobSize)
                            {
                                //Point2f pt((int)(moms.m10 / moms.m00),(int)(moms.m01 / moms.m00));
                                //circle(toBlobs[k],pt,sqrt((double)moms.m00/3.14),cvScalar(255),-1);
                                //Save blob centers
                                pt.x=(int)(moms.m10 / moms.m00);
                                pt.y=(int)(moms.m01 / moms.m00);
                                finBlobs[tid].push_back(pt);
                                blobSizes[tid].push_back((int)moms.m00);
                            }
                        }
                        update[tid]=true;
                    }
                    else{update[tid]=false;}
                }
                //}
            }
        }
        else{
            if(cam->updateCam(false,0,true,false)){
                cam->retriveDepthAndMask(0,depths[0],masks[0]);
                cam->retrivePointCloudFast(0,&(depths[0]),&(pCs[0]));
                if(cConfig[0]->method=="Background"){
                    depths[0].convertTo(depthsS[0],CV_32FC1,1.0/6000);
                    sktP[0]->processImage(depthsS[0],toBlobs[0],masks[0]);
                }
                else
                {
                    sktP[0]->processImage(pCs[0],toBlobs[0],masks[0]);
                }
                findContours(toBlobs[0], blobs[0],CV_RETR_LIST,CV_CHAIN_APPROX_NONE);
                finBlobs[0].clear();
                blobSizes[0].clear();
                for(b=0; b<blobs[0].size(); b++)
                {
                    moms=moments(blobs[0][b],true);
                    if(moms.m00>cConfig[0]->minBlobSize)
                    {
                        //Point2f pt((int)(moms.m10 / moms.m00),(int)(moms.m01 / moms.m00));
                        //circle(toBlobs[k],pt,sqrt((double)moms.m00/3.14),cvScalar(255),-1);
                        //Save blob centers
                        pt.x=(int)(moms.m10 / moms.m00);
                        pt.y=(int)(moms.m01 / moms.m00);
                        finBlobs[0].push_back(pt);
                        blobSizes[0].push_back((int)moms.m00);
                    }
                }
                update[0]=true;
            }
            else{update[0]=false;}
        }
        bool doUpdate=true;
        for(int i=0;i<cam->numContextosIniciados;i++){
            doUpdate=(doUpdate && update[i]);
        }
        if(doUpdate){
            vector<Point2f> ou=finBlobs[0];
            if(cam->numContextosIniciados>1 && update[1]){
                if(ui->mixActive->isChecked()){
                    //Run through blob centers and get 3d vector
                    //Convert 3d vectors to the first cam
                    int count=0;
                    XnPoint3D* rP=real;
                    Vec3f v;
                    //cout <<"SIZE : " << finBlobs[1].size() << endl;
                    for(int i=0;i<finBlobs[1].size();i++){
                        if(count<999 && calMat.cols>2){
                            /// POSIBLE SPEEDUP : USE POINTERS
                            v=pCs[1].at<Vec3f>(finBlobs[1][i].y,finBlobs[1][i].x);
                            ///
                            (*rP).X=v[0]*matC[0][0]+v[1]*matC[0][1]+v[2]*matC[0][2] +matC[0][3];
                            (*rP).Y=v[0]*matC[1][0]+v[1]*matC[1][1]+v[2]*matC[1][2] +matC[1][3];
                            (*rP).Z=v[0]*matC[2][0]+v[1]*matC[2][1]+v[2]*matC[2][2] +matC[2][3];
                            count++;rP++;}
                    }
                    //Convert results to projected in first cam
                    if(count >0)
                        cam->depthG[0].ConvertRealWorldToProjective(count,real,proj);

                    rP=proj;

                    for(int i=0;i<count;i++){
                        //circle(final,Point(proj[i].X,proj[i].Y),sqrt((double)blobSizes[1][i]/3.14),cvScalar(0,0,255),-1);
                        if(!hasCloseBlob2(rP,finBlobs[0],mD)){
                            //cout << "1 : "<< (int)(rP->X) << "," << (int)(rP->Y) << endl;
                            ou.push_back(Point2f(rP->X,rP->Y));
                        }
                        rP++;
                    }
                }
                else{
                    ou.insert(ou.end(), finBlobs[1].begin(), finBlobs[1].end() );
                }
            }
            if(sendTuio && isServerCreated){
                if(calSKT->isCalibrated){
                    calSKT->convertPointVector(&ou);
                }
                tds->sendCursors(ou,minDataUpdate);
            }
            if(useDC){
                if(calSKT->isCalibrated){
                    calSKT->convertPointVector(&ou);
                }
                dc->updateData(ou,minDataUpdate);
            }
            numFrames++;
        }
        if(numFrames==10){
            numFrames=0;

#ifdef _WIN32 || _WIN64
            SYSTEMTIME fin;
            GetSystemTime(&fin);
            double timeSecs=(double)(fin.wSecond+(double)fin.wMilliseconds/1000.0-((double)inicio.wMilliseconds/1000.0+inicio.wSecond))/10;
#else
            struct timeval fin;
            gettimeofday(&fin,NULL);
            double timeSecs=(double)(fin.tv_sec+(double)fin.tv_usec/1000000.0-((double)inicio.tv_usec/1000000.0+inicio.tv_sec))/10;
#endif
            //cout << timeSecs << endl;
            double dif=1.0/timeSecs;
            //cout << "FPS : " << dif <<endl;
            ui->framesPerSec->setText(QString::number(dif));
        }
        delete update;
    }
}
Esempio n. 6
0
bool ShapeDiscriptor::discribeImage(cv::Mat &image)
{
    std::vector< std::vector<cv::Point> > contours;
    cv::Mat timage = image.clone();
    printf("rows : %d\n", image.rows);
    findContours(timage, contours, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);
    for(int contourIdx = 0; contourIdx < contours.size(); contourIdx++)
    {

        cv::Moments moms = moments(cv::Mat(contours[contourIdx]));
        if(doFilterArea)
        {
            double area = moms.m00;
            if (area < minArea || area > maxArea)
            //drawContours(image, contours, contourIdx, cv::Scalar(0,0,0), CV_FILLED);
            continue;
        }
        
        if(doFilterCircularity)
        {
          double area = moms.m00;
          double perimeter = cv::arcLength(cv::Mat(contours[contourIdx]), true);
          double ratio = 4 * CV_PI * area / (perimeter * perimeter);
          if (ratio < minCircularity)
            //drawContours(image, contours, contourIdx, cv::Scalar(0,0,0), CV_FILLED);
            continue;
        }

        if(doFilterInertia)
        {
          double denominator = sqrt(pow(2 * moms.mu11, 2) + pow(moms.mu20 - moms.mu02, 2));
          const double eps = 1e-2;
          double ratio;
          if (denominator > eps)
          {
            double cosmin = (moms.mu20 - moms.mu02) / denominator;
            double sinmin = 2 * moms.mu11 / denominator;
            double cosmax = -cosmin;
            double sinmax = -sinmin;

            double imin = 0.5 * (moms.mu20 + moms.mu02) - 0.5 * (moms.mu20 - moms.mu02) * cosmin - moms.mu11 * sinmin;
            double imax = 0.5 * (moms.mu20 + moms.mu02) - 0.5 * (moms.mu20 - moms.mu02) * cosmax - moms.mu11 * sinmax;
            ratio = imin / imax;
          }
          else
          {
            ratio = 1;
          }
          //p.inErtia = ratio;
          if (ratio < minInertia)
             //drawContours(image, contours, contourIdx, cv::Scalar(0,0,0), CV_FILLED);
             
            continue;
        }

        if(doFilterConvexity)
        {
          cv::vector < cv::Point > hull;
          convexHull(cv::Mat(contours[contourIdx]), hull);
          double area = cv::contourArea(cv::Mat(contours[contourIdx]));
          double hullArea = cv::contourArea(cv::Mat(hull));
          double ratio = area / hullArea;
          //p.convexity = ratio;
          if (ratio < minConvexity)
            //drawContours(image, contours, contourIdx, cv::Scalar(0,0,0), CV_FILLED);

            continue;
        }

        timage.release();
        return true;
    }
    timage.release();
    return false;
} 
Esempio n. 7
0
// get ROI + edgeDectection
void FindContour::cellDetection(const Mat &img, vector<Point> &cir_org,
                                Mat &dispImg1, Mat &dispImg2,
                                vector<Point2f> &points1, vector<Point2f> &points2,
                                int &area,
                                int &perimeter,
                                Point2f &ctroid,
                                float &shape,
//                                vector<int> &blebs,
                                int &frameNum){
    frame = &img;
    //rect = boundingRect(Mat(cir));


    Mat frameGray;
    cv::cvtColor(*frame, frameGray, CV_RGB2GRAY);
/*
    QString cellFileName0 = "frame" + QString::number(frameNum) + ".png";
    imwrite(cellFileName0.toStdString(), frameGray);*/

    vector<Point> cir; //***global coordinates of circle***
    for(unsigned int i = 0; i < cir_org.size(); i++){
        cir.push_back(Point(cir_org[i].x / scale, cir_org[i].y / scale));
    }
    //cout << "original circle: " << cir_org << "\n" << " scaled circle: " << cir << endl;

    //enlarge the bounding rect by adding a margin (e) to it
    rect = enlargeRect(boundingRect(Mat(cir)), 5, img.cols, img.rows);

    //global circle mask
    Mat mask_cir_org = Mat::zeros(frame->size(), CV_8UC1);
    fillConvexPoly(mask_cir_org, cir, Scalar(255));

    // flow points
    vector<unsigned int> cell_pts_global;
    vector<Point2f> longOptflow_pt1, longOptflow_pt2;
    Point2f avrg_vec = Point2f(0,0);
    for(unsigned int i = 0; i < points1.size(); i++){
        Point p1 = Point(points1[i].x, points1[i].y);
        Point p2 = Point(points2[i].x, points2[i].y);
        if(mask_cir_org.at<uchar>(p1.y,p1.x) == 255 ){
            cell_pts_global.push_back(i);
            if(dist_square(p1, p2) > 2.0){
                longOptflow_pt1.push_back(Point2f(p1.x, p1.y));
                longOptflow_pt2.push_back(Point2f(p2.x, p2.y));
                avrg_vec.x += (p2.x-p1.x);
                avrg_vec.y += (p2.y-p1.y);
            }
        }
    }

//    if(longOptflow_pt1.size()!= 0){
//        avrg_vec.x = avrg_vec.x / longOptflow_pt1.size();
//        avrg_vec.y = avrg_vec.y / longOptflow_pt1.size();
//    }
    Rect trans_rect = translateRect(rect, avrg_vec);


    // ***
    // if (the homography is a good one) use the homography to update the rectangle
    // otherwise use the same rectangle
    // ***
    if (longOptflow_pt1.size() >= 4){
        Mat H = findHomography(Mat(longOptflow_pt1), Mat(longOptflow_pt2), CV_RANSAC, 2);
        //cout << "H: " << H << endl;

        if(determinant(H) >= 0){
            vector<Point> rect_corners;
            rect_corners.push_back(Point(rect.x, rect.y));
            rect_corners.push_back(Point(rect.x+rect.width, rect.y));
            rect_corners.push_back(Point(rect.x, rect.y+rect.height));
            rect_corners.push_back(Point(rect.x+rect.width, rect.y+rect.height));

            vector<Point> rect_update_corners = pointTransform(rect_corners, H);
            trans_rect = boundingRect(rect_update_corners);
        }
    }


    rectangle(frameGray, trans_rect, Scalar(255), 2);
    imshow("frameGray", frameGray);









    dispImg1 = (*frame)(rect).clone();
    //dispImg2 = Mat(dispImg1.rows, dispImg1.cols, CV_8UC3);

    Mat sub; //*** the rectangle region of ROI (Gray) ***
    cv::cvtColor(dispImg1, sub, CV_RGB2GRAY);
    int width = sub.cols;
    int height = sub.rows;

    vector<Point> circle_ROI; //***local coordinates of circle***
    for (unsigned int i = 0; i < cir.size(); i++){
        Point p = Point(cir[i].x - rect.x, cir[i].y - rect.y);
        circle_ROI.push_back(p);
    }

    Mat adapThreshImg1 = Mat::zeros(height, width, sub.type());
    //image edge detection for the sub region (roi rect)
    adaptiveThreshold(sub, adapThreshImg1, 255.0, ADAPTIVE_THRESH_GAUSSIAN_C,
                          CV_THRESH_BINARY_INV, blockSize, constValue);
    //imshow("adapThreshImg1", adapThreshImg1);

    // dilation and erosion
    Mat dilerod;
    dilErod(adapThreshImg1, dilerod, dilSize);

    //display image 2 -- dilerod of adaptive threshold image
    GaussianBlur(dilerod, dilerod, Size(3, 3), 2, 2 );

    //mask for filtering out the cell of interest
    Mat mask_conv = Mat::zeros(height, width, CV_8UC1);
    fillConvexPoly(mask_conv, circle_ROI, Scalar(255));
    //imshow("mask_before", mask_conv);

    //dilate the mask -> region grows
    Mat mask_conv_dil;
    Mat element = getStructuringElement( MORPH_ELLIPSE, Size( 2*2+2, 2*2+1 ), Point(2,2) );
    dilate(mask_conv, mask_conv_dil, element);
    //imshow("mask_dil", mask_conv_dil);



    /*
    Mat mask_conv_ero;
    erode(mask_conv, mask_conv_ero, element);
    Mat ring_dil, ring_ero;
    bitwise_xor(mask_conv, mask_conv_dil, ring_dil);
    bitwise_xor(mask_conv, mask_conv_ero, ring_ero);
    Mat ring;
    bitwise_or(ring_dil, ring_ero, ring);
    imshow("ring", ring);

    vector<unsigned int> opt_onRing_index;
    // use optflow info set rectangle
    for(unsigned int i = 0; i < points2.size(); i++){
        Point p2 = Point(points2[i].x, points2[i].y);
        Point p1 = Point(points1[i].x, points1[i].y);
        if(ring.at<uchar>(p1.y,p1.x) != 255 &&
                ring.at<uchar>(p2.y,p2.x) != 255)
            continue;
        else{
            opt_onRing_index.push_back(i);
        }
    }*/

    /*
    // draw the optflow on dispImg1
    unsigned int size = opt_inside_cl_index.size();
    for(unsigned int i = 0; i < size; i++ ){
        Point p0( ceil( points1[i].x - rect.x ), ceil( points1[i].y - rect.y ) );
        Point p1( ceil( points2[i].x - rect.x ), ceil( points2[i].y - rect.y) );
        //std::cout << "(" << p0.x << "," << p0.y << ")" << "\n";
        //std::cout << "(" << p1.x << "," << p1.y << ")" << std::endl;

        //draw lines to visualize the flow
        double angle = atan2((double) p0.y - p1.y, (double) p0.x - p1.x);
        double arrowLen = 0.01 * (double) (width);
        line(dispImg1, p0, p1, CV_RGB(255,255,255), 1 );
        Point p;
        p.x = (int) (p1.x + arrowLen * cos(angle + 3.14/4));
        p.y = (int) (p1.y + arrowLen * sin(angle + 3.14/4));
        line(dispImg1, p, p1, CV_RGB(255,255,255), 1 );
        p.x = (int) (p1.x + arrowLen * cos(angle - 3.14/4));
        p.y = (int) (p1.y + arrowLen * sin(angle - 3.14/4));

        line(dispImg1, p, Point(2*p1.x - p0.x, 2*p1.y - p0.y), CV_RGB(255,255,255), 1 );
        //line(dispImg1, p, p1, CV_RGB(255,255,255), 1 );
    }*/

/*
    //stop growing when meeting with canny edges that outside the circle

    Mat canny;
    CannyWithBlur(sub, canny);
    Mat cannyColor;
    cvtColor(canny, cannyColor, CV_GRAY2RGB);
    imwrite("canny.png", canny);
    vector<Point> outsideCircle;
    vector<Point> onRing;
    for(int j = 0; j < height; j++){
        for(int i = 0; i < width; i++){
            if(canny.at<uchar>(j,i) != 0 && mask_conv.at<uchar>(j,i) == 0){
                cannyColor.data[cannyColor.channels()*(cannyColor.cols*j + i)+0] = 81;
                cannyColor.data[cannyColor.channels()*(cannyColor.cols*j + i)+1] = 172;
                cannyColor.data[cannyColor.channels()*(cannyColor.cols*j + i)+2] = 251;
                outsideCircle.push_back(Point(i, j));
                if(ring.at<uchar>(j,i) != 0){
                    cannyColor.data[cannyColor.channels()*(cannyColor.cols*j + i)+0] = 255;
                    cannyColor.data[cannyColor.channels()*(cannyColor.cols*j + i)+1] = 255;
                    cannyColor.data[cannyColor.channels()*(cannyColor.cols*j + i)+2] = 0;
                    onRing.push_back(Point(i,j));
                }
            }
        }
    } */

//    QString cannyFileName = "canny" + QString::number(frameNum) + ".png";
//    imwrite(cannyFileName.toStdString(), cannyColor);



    //bitwise AND on mask and dilerod
    bitwise_and(mask_conv/*_dil*/, dilerod, dispImg2);

    // findcontours
    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;
    unsigned int largest_contour_index;
    dilErodContours(dispImg2, contours, hierarchy, largest_contour_index, perimeter, dispImg1);

    // find the area of the cell by counting the white area inside the largest contour
    Mat cellArea = Mat::zeros(height, width, CV_8UC1);
    drawContours(cellArea, contours, largest_contour_index, Scalar(255), -1, 8, hierarchy, 0, Point() );
    //imshow("cell", cell);
    area = countNonZero(cellArea);

    //cout << "frame " << frameNum << "\n";
    //cout << contours[largest_contour_index] << endl;


    //change dispImg2 from gray to rgb for displaying
    cvtColor(dispImg2, dispImg2, CV_GRAY2RGB);

    //renew circle points as the convex hull
    vector<Point> convHull;
    convexHull(contours[largest_contour_index], convHull);


    // find the centroid of the contour
    Moments mu = moments(contours[largest_contour_index]);
    ctroid = Point2f(mu.m10/mu.m00 + rect.x, mu.m01/mu.m00 + rect.y);

    // find the shape of the cell by the largest contour and centroid
    shape = findShape(ctroid, contours[largest_contour_index]);

    ////---- draw largest contour start ----
    //draw the largest contour
    Mat borderImg = Mat::zeros(height, width, CV_8UC1);
    drawContours(borderImg, contours, largest_contour_index, Scalar(255), 1, 8, hierarchy, 0, Point());
    //QString cellFileName0 = "border" + QString::number(frameNum) + ".png";
    //imwrite(cellFileName0.toStdString(), borderImg);
    ////---- draw largest contour end ----

    // find the number and the sizes of blebs of the cell
    Mat smooth;
    vector<Point> smoothCurve;
    int WIN = 25;
    vector< vector<Point> > tmp;
    smooth = curveSmooth(borderImg, WIN, contours[largest_contour_index], smoothCurve, convHull/*ctroid*/);
    tmp.push_back(smoothCurve);
    drawContours(dispImg1, tmp, 0, Scalar(255, 0, 0));

    bitwise_not(smooth, smooth);
    Mat blebsImg;
    bitwise_and(smooth, cellArea, blebsImg);
    //imshow("blebs", blebs);
    //QString cellFileName2 = "blebs" + QString::number(frameNum) + ".png";
    //imwrite(cellFileName2.toStdString(), blebs);

//    vector<Point> blebCtrs;
//    recursive_connected_components(blebsImg, blebs, blebCtrs);
//    for(unsigned int i = 0; i < blebCtrs.size(); i++){
//        circle(dispImg1, blebCtrs[i], 2, Scalar(255, 255, 0));
//    }


    cir_org.clear();
    for(unsigned int i = 0; i < convHull.size(); i++)
        cir_org.push_back(Point((convHull[i].x + rect.x)*scale, (convHull[i].y + rect.y)*scale));

}
Esempio n. 8
0
    void DataFromNexus::pushForcePlateData(VDS::Client& client) {

        vector<SimTK::Vec3> pfPos(getForcePlatePosition());

        auto rate = client.GetFrameRate();
        //Force Plates
        // Output the force plate information.
        const unsigned forcePlateSubsamples = client.GetForcePlateSubsamples(0).ForcePlateSubsamples;
        for (unsigned int ssIdx = 0; ssIdx < forcePlateSubsamples; ++ssIdx)    {
            MultipleExternalForcesFrame currentGrfFrame;
            currentGrfFrame.time = 1. / rate.FrameRateHz*(frameNumber_ + 1. / forcePlateSubsamples * ssIdx);
            for (unsigned fpIdx = 0; fpIdx < forcePlateCount_; ++fpIdx) {

                SimTK::Vec3 currentFpPos = pfPos.at(fpIdx);

                ExternalForceData fpData;
                fpData.setSourceName(forcePlateNames_.at(fpIdx));
                VDS::Output_GetGlobalForceVector forceVector = client.GetGlobalForceVector(fpIdx, ssIdx);
                SimTK::Vec3 grfVec(0.);
                grfVec[0] = forceVector.ForceVector[0];
                grfVec[1] = forceVector.ForceVector[1];
                grfVec[2] = forceVector.ForceVector[2];

                VDS::Output_GetGlobalCentreOfPressure centreOfPressure = client.GetGlobalCentreOfPressure(fpIdx, ssIdx);
                SimTK::Vec3 grfPoint;
                grfPoint[0] = centreOfPressure.CentreOfPressure[0]; // *fromNexusToModelLengthConversion_;
                grfPoint[1] = centreOfPressure.CentreOfPressure[1]; // *fromNexusToModelLengthConversion_;
                grfPoint[2] = centreOfPressure.CentreOfPressure[2]; // *fromNexusToModelLengthConversion_;

                //calculate the values of the moment of the 'position' reference system of the force plate in the global goordinate system
                SimTK::Vec3 momentOnPosition(0.);
                momentOnPosition[0] = currentFpPos[1] * grfVec[2] - currentFpPos[2] * grfVec[1];
                momentOnPosition[1] = currentFpPos[2] * grfVec[0] - currentFpPos[0] * grfVec[2];
                momentOnPosition[2] = currentFpPos[0] * grfVec[1] - currentFpPos[1] * grfVec[0];

                VDS::Output_GetGlobalMomentVector momentVector = client.GetGlobalMomentVector(fpIdx, ssIdx);

                //calculate the correct values of the moments relatively the global goordinate system by adding the missing position moment
                SimTK::Vec3 moments(0.);
                moments[0] = momentVector.MomentVector[0] + momentOnPosition[0];
                moments[1] = momentVector.MomentVector[1] + momentOnPosition[1];
                moments[2] = momentVector.MomentVector[2] + momentOnPosition[2];

                SimTK::Vec3 grfTorque;
                grfTorque[0] = 0;
                grfTorque[1] = (moments[1] - grfVec[0] * grfPoint[2] + grfVec[2] * grfPoint[0]);
                grfTorque[2] = 0;

                fpData.setForceVector(-grfVec);
                fpData.setMoments(-moments);
                fpData.setUseApplicationPoint(true);
                fpData.setTorque(-grfTorque);
                if (fabs(grfVec[1]) < 10) {
                    grfPoint[0] = 0; grfPoint[2] = 0;
                }

                fpData.setApplicationPoint(grfPoint);
                currentGrfFrame.data.push_back(fpData);
            }
            outputGrfQueue_->push(currentGrfFrame);
        }
    }
void SupportVectorMachineDemo(Mat& class1_samples, char* class1_name, Mat& class2_samples, char* class2_name, Mat& unknown_samples)
{
    float labels[MAX_SAMPLES];
    float training_data[MAX_SAMPLES][2];
	CvSVM SVM;

    // Image for visual representation of (2-D) feature space
    int width = MAX_FEATURE_VALUE+1, height = MAX_FEATURE_VALUE+1;
    Mat feature_space = Mat::zeros(height, width, CV_8UC3);

	int number_of_samples = 0;
	// Loops three times:
	//  1st time - extracts feature values for class 1
	//  2nd time - extracts feature values for class 2 AND trains SVM
	//  3rd time - extracts feature values for unknowns AND predicts their classes using SVM
	for (int current_class = 1; current_class<=UNKNOWN_CLASS; current_class++)
	{
		Mat gray_image,binary_image;
		if (current_class == 1)
			cvtColor(class1_samples, gray_image, CV_BGR2GRAY);
		else if (current_class == 2)
			cvtColor(class2_samples, gray_image, CV_BGR2GRAY);
		else cvtColor(unknown_samples, gray_image, CV_BGR2GRAY);        
		threshold(gray_image,binary_image,128,255,THRESH_BINARY_INV);

	    vector<vector<Point>> contours;
		vector<Vec4i> hierarchy;
		findContours(binary_image,contours,hierarchy,CV_RETR_TREE,CV_CHAIN_APPROX_NONE);
		Mat contours_image = Mat::zeros(binary_image.size(), CV_8UC3);
		contours_image = Scalar(255,255,255);
		// Do some processing on all contours (objects and holes!)
		vector<vector<Point>> hulls(contours.size());
		vector<vector<int>> hull_indices(contours.size());
		vector<vector<Vec4i>> convexity_defects(contours.size());
		vector<Moments> contour_moments(contours.size());
		for (int contour_number=0; (contour_number>=0); contour_number=hierarchy[contour_number][0])
		{
			if (contours[contour_number].size() > 10)
			{
				convexHull(contours[contour_number], hulls[contour_number]);
				convexHull(contours[contour_number], hull_indices[contour_number]);
				convexityDefects( contours[contour_number], hull_indices[contour_number], convexity_defects[contour_number]);
				contour_moments[contour_number] = moments( contours[contour_number] );
				// Draw the shape and features
				Scalar colour( rand()&0x7F, rand()&0x7F, rand()&0x7F );
				drawContours( contours_image, contours, contour_number, colour, CV_FILLED, 8, hierarchy );
				char output[500];
				double area = contourArea(contours[contour_number])+contours[contour_number].size()/2+1;
				// Draw the convex hull
				drawContours( contours_image, hulls, contour_number, Scalar(127,0,127) );
				// Highlight any convexities
				int largest_convexity_depth=0;
				for (int convexity_index=0; convexity_index < (int)convexity_defects[contour_number].size(); convexity_index++)
				{
					if (convexity_defects[contour_number][convexity_index][3] > largest_convexity_depth)
						largest_convexity_depth = convexity_defects[contour_number][convexity_index][3];
					if (convexity_defects[contour_number][convexity_index][3] > 256*2)
					{
						line( contours_image, contours[contour_number][convexity_defects[contour_number][convexity_index][0]], contours[contour_number][convexity_defects[contour_number][convexity_index][2]], Scalar(0,0, 255));
						line( contours_image, contours[contour_number][convexity_defects[contour_number][convexity_index][1]], contours[contour_number][convexity_defects[contour_number][convexity_index][2]], Scalar(0,0, 255));
					}
				}
				// Compute moments and a measure of the deepest convexity
				double hu_moments[7];
				HuMoments( contour_moments[contour_number], hu_moments );
				double diameter = ((double) contours[contour_number].size())/PI;
				double convexity_depth = ((double) largest_convexity_depth)/256.0;
				double convex_measure = convexity_depth/diameter;
				int class_id = current_class;
				float feature[2] = { (float) convex_measure*((float) MAX_FEATURE_VALUE), (float) hu_moments[0]*((float) MAX_FEATURE_VALUE) };
				if (feature[0] > ((float) MAX_FEATURE_VALUE)) feature[0] = ((float) MAX_FEATURE_VALUE);
				if (feature[1] > ((float) MAX_FEATURE_VALUE)) feature[1] = ((float) MAX_FEATURE_VALUE);
				if (current_class == UNKNOWN_CLASS)
				{
					// Try to predict the class
					Mat sampleMat = (Mat_<float>(1,2) << feature[0], feature[1]);
					float prediction = SVM.predict(sampleMat);
					class_id = (prediction == 1.0) ? 1 : (prediction == -1.0) ? 2 : 0;
				}
				char* current_class_name = (class_id==1) ? class1_name : (class_id==2) ? class2_name : "Unknown";

				sprintf(output,"Class=%s, Features %.2f, %.2f", current_class_name, feature[0]/((float) MAX_FEATURE_VALUE), feature[1]/((float) MAX_FEATURE_VALUE));
				Point location( contours[contour_number][0].x-40, contours[contour_number][0].y-3 );
				putText( contours_image, output, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
				if (current_class == UNKNOWN_CLASS)
				{
				}
				else if (number_of_samples < MAX_SAMPLES)
				{
					labels[number_of_samples] = (float) ((current_class == 1) ? 1.0 : -1.0);
					training_data[number_of_samples][0] = feature[0];
					training_data[number_of_samples][1] = feature[1];
					number_of_samples++;
				}
			}
		}
		if (current_class == 1)
		{
			Mat temp_output = contours_image.clone();
			imshow(class1_name, temp_output );
		}
		else if (current_class == 2)
		{
			Mat temp_output2 = contours_image.clone();
			imshow(class2_name, temp_output2 );

			// Now that features for both classes have been determined, train the SVM
			Mat labelsMat(number_of_samples, 1, CV_32FC1, labels);
			Mat trainingDataMat(number_of_samples, 2, CV_32FC1, training_data);
			// Set up SVM's parameters
			CvSVMParams params;
			params.svm_type    = CvSVM::C_SVC;
			params.kernel_type = CvSVM::POLY;
			params.degree = 1;
			params.term_crit   = cvTermCriteria(CV_TERMCRIT_ITER, 100, 1e-6);
			// Train the SVM
			SVM.train(trainingDataMat, labelsMat, Mat(), Mat(), params);

			// Show the SVM classifier for all possible feature values
			Vec3b green(192,255,192), blue (255,192,192);
			// Show the decision regions given by the SVM
			for (int i = 0; i < feature_space.rows; ++i)
				for (int j = 0; j < feature_space.cols; ++j)
				{
					Mat sampleMat = (Mat_<float>(1,2) << j,i);
					float prediction = SVM.predict(sampleMat);
					if (prediction == 1)
						feature_space.at<Vec3b>(i,j) = green;
					else if (prediction == -1)
					    feature_space.at<Vec3b>(i,j)  = blue;
				}
			// Show the training data (as dark circles)
			for(int sample=0; sample < number_of_samples; sample++)
				if (labels[sample] == 1.0)
					circle( feature_space, Point((int) training_data[sample][0], (int) training_data[sample][1]), 3, Scalar( 0, 128, 0 ), -1, 8);
				else circle( feature_space, Point((int) training_data[sample][0], (int) training_data[sample][1]), 3, Scalar( 128, 0, 0 ), -1, 8);
			// Highlight the support vectors (in red)
			int num_support_vectors = SVM.get_support_vector_count();
			for (int support_vector_index = 0; support_vector_index < num_support_vectors; ++support_vector_index)
			{
				const float* v = SVM.get_support_vector(support_vector_index);
				circle( feature_space,  Point( (int) v[0], (int) v[1]),   3,  Scalar(0, 0, 255));
			}
			imshow("SVM feature space", feature_space);
		}
		else if (current_class == 3)
		{
			imshow("Classification of unknowns", contours_image );
		}
	}
}
Mat& CHandGestureRecognitionSystemDlg::FeatureDetect(const Mat& image_input, Mat& image_output)
{
    image_input.copyTo(image_output);
    cvtColor(image_output, image_output, CV_BGR2GRAY);
#ifdef _DEBUG
    TRACE("gray image channels = %d\n", image_output.channels());
#endif
    // morphology
    Mat kernel = Mat(3,3,CV_8UC1, Scalar(255));
    morphologyEx(image_output, image_output, MORPH_OPEN, kernel);
    // floodfill
    int num_floodfill = 0;
    int area_max = 0;
    int value_max = 0;
    for (int i = 0; i < image_output.rows; i++) {
        unsigned char* p_out = image_output.ptr<uchar>(i);
        for (int j = 0; j < image_output.cols; j++) {
        	if (*(p_out + j) == 255) {
                num_floodfill++;
                Scalar new_val = Scalar(num_floodfill);
                Point seed = Point(j, i);
                CRect rect;
                int area = floodFill(image_output,seed, new_val);
                if (area > area_max) {
                    area_max = area;
                    value_max = num_floodfill;
                }
        	}
        }
    }
    // max area
    int area_left = image_output.cols;
    int area_right = 0;
    int area_top = image_output.rows;
    int area_buttom = 0;
    for (int i = 0; i < image_output.rows; i++) {
        unsigned char* p_out = image_output.ptr<uchar>(i);
        for (int j = 0; j < image_output.cols; j++) {
            if (*(p_out + j) == value_max) {
                *(p_out + j) = 255;
                if (area_left > j) area_left = j;
                if (area_right < j) area_right = j;
                if (area_top > i) area_top = i;
                if (area_buttom < i) area_buttom = i;
            } else {
                *(p_out + j) = 0;
            }
        }
    }
#ifdef _DEBUG
    TRACE("area_left = %d\n", area_left);
    TRACE("area_right = %d\n", area_right);
    TRACE("area_top = %d\n", area_top);
    TRACE("area_buttom = %d\n", area_buttom);
#endif
    // rectangle
    rectangle(image_output, Point(area_left, area_top), Point(area_right, area_buttom), Scalar(255), 5); 
    // moment
    Moments moment = moments(image_output);
    int center_x = moment.m10 / moment.m00;
    int center_y = moment.m01 / moment.m00;
    point_end = Point(center_x, center_y);
    circle(image_output, point_end, 10, Scalar(255), 5);
    GetVector(point_begin, point_end);
    if (vector_length >= 20 || point_begin == Point(image_width / 2, image_height / 2)) {
        point_begin = point_end;
    }
#ifdef _DEBUG
    TRACE("vector_length = %f\n", vector_length);
    TRACE("vector_angle = %f\n", vector_angle);
#endif
    return image_output;
}
	std::vector<std::vector<std::vector<cv::Point>>> MultiContourObjectDetector::processContours(
		std::vector<std::vector<std::vector<cv::Point>>> approxContours,
		double hammingThreshold,
		double correlationThreshold,
		int* numberOfObject)
	{
		vector<vector<vector<Point>>> objects;
		double attenuation = 0;

		for (int i = 0; i < approxContours.size(); i++)
		{
			if (approxContours[i].size() != _baseShape.size())
				continue;
			attenuation = 0;

#ifdef DEBUG_MODE
			Mat tempI(Size(1000, 1000), CV_8UC1);
			tempI = Scalar(0);
			drawContours(tempI, approxContours[i], -1, cv::Scalar(255), 1, CV_AA);
#endif

			double totCorrelation = 0,
				totHamming = 0;

			Moments m = moments(approxContours[i][0], true);
			int cx = int(m.m10 / m.m00);
			int cy = int(m.m01 / m.m00);

			Point c(cx, cy);

			if (!(c.x >= _attenuationRect.x &&
				c.y >= _attenuationRect.y &&
				c.x <= (_attenuationRect.x + _attenuationRect.width) &&
				c.y <= (_attenuationRect.y + _attenuationRect.height)))
				attenuation = 5;

			// C and H with external contour
			vector<Point> externalKeypoints;
			Utility::findCentroidsKeypoints(approxContours[i][0], externalKeypoints, Utility::CentroidDetectionMode::THREE_LOOP);
			totCorrelation += (Utility::correlationWithBase(externalKeypoints, _baseKeypoints[0]) - attenuation);

			totHamming += (Utility::calculateContourPercentageCompatibility(approxContours[i][0], _baseShape[0]) - attenuation);

			// looking for the contour with the better cnetroids and shape match

			for (int j = 1; j < approxContours[i].size(); j++)
			{
				attenuation = 0;

				Moments m = moments(approxContours[i][j], true);
				int cx = int(m.m10 / m.m00);
				int cy = int(m.m01 / m.m00);

				Point c(cx, cy);

				if (!(c.x >= _attenuationRect.x &&
					c.y >= _attenuationRect.y &&
					c.x <= (_attenuationRect.x + _attenuationRect.width) &&
					c.y <= (_attenuationRect.y + _attenuationRect.height)))
					attenuation = 5;


				double maxCorrelation = std::numeric_limits<double>::min(),
					maxHamming = std::numeric_limits<double>::min();

				for (int k = 1; k < _baseShape.size(); k++)
				{
					vector<Point> internalKeypoints;
					Utility::findCentroidsKeypoints(approxContours[i][j], internalKeypoints, Utility::CentroidDetectionMode::THREE_LOOP);
					maxCorrelation = max(maxCorrelation, Utility::correlationWithBase(internalKeypoints, _baseKeypoints[k]));

					maxHamming = max(maxHamming, Utility::calculateContourPercentageCompatibility(approxContours[i][j], _baseShape[k]));
				}

				totCorrelation += (maxCorrelation - attenuation);
				totHamming += (maxHamming - attenuation);
			}

			totCorrelation /= approxContours[i].size();
			totHamming /= approxContours[i].size();

			cout << "Middle Correlation " << to_string(i) << " with base ---> " << totCorrelation << endl;
			cout << "Middle Hamming distance" << to_string(i) << " with base ---> " << totHamming << endl;

			if (totCorrelation >= correlationThreshold && totHamming >= hammingThreshold)
				objects.push_back(approxContours[i]);
		}

		*numberOfObject = objects.size();

		return objects;
	}
// In this function, the Voronoi cell is calculated, integrated and the new goal point is calculated and published.
void SimpleDeployment::publish()
{
    if ( got_map_ && got_me_ ) {
        double factor = map_.info.resolution / resolution_; // zoom factor for openCV visualization

        ROS_DEBUG("SimpleDeployment: Map received, determining Voronoi cells and publishing goal");

        removeOldAgents();

        // Create variables for x-values, y-values and the maximum and minimum of these, needed for VoronoiDiagramGenerator
        float xvalues[agent_catalog_.size()];
        float yvalues[agent_catalog_.size()];
        double xmin = 0.0, xmax = 0.0, ymin = 0.0, ymax = 0.0;
        cv::Point seedPt = cv::Point(1,1);

        // Create empty image with the size of the map to draw points and voronoi diagram in
        cv::Mat vorImg = cv::Mat::zeros(map_.info.height*factor,map_.info.width*factor,CV_8UC1);

        for ( uint i = 0; i < agent_catalog_.size(); i++ ) {
            geometry_msgs::Pose pose = agent_catalog_[i].getPose();

            // Keep track of x and y values
            xvalues[i] = pose.position.x;
            yvalues[i] = pose.position.y;

            // Keep track of min and max x
            if ( pose.position.x < xmin ) {
                xmin = pose.position.x;
            } else if ( pose.position.x > xmax ) {
                xmax = pose.position.x;
            }

            // Keep track of min and max y
            if ( pose.position.y < ymin ) {
                ymin = pose.position.y;
            } else if ( pose.position.y > ymax ) {
                ymax = pose.position.y;
            }

            // Store point as seed point if it represents this agent
            if ( agent_catalog_[i].getName() == this_agent_.getName() ){
                // Scale positions in metric system column and row numbers in image
                int c = ( pose.position.x - map_.info.origin.position.x ) * factor / map_.info.resolution;
                int r = map_.info.height * factor - ( pose.position.y - map_.info.origin.position.y ) * factor / map_.info.resolution;
                cv::Point pt =  cv::Point(c,r);

                seedPt = pt;
            }
            // Draw point on image
//            cv::circle( vorImg, pt, 3, WHITE, -1, 8);
        }

        ROS_DEBUG("SimpleDeployment: creating a VDG object and generating Voronoi diagram");
        // Construct a VoronoiDiagramGenerator (VoronoiDiagramGenerator.h)
        VoronoiDiagramGenerator VDG;

        xmin = map_.info.origin.position.x; xmax = map_.info.width  * map_.info.resolution + map_.info.origin.position.x;
        ymin = map_.info.origin.position.y; ymax = map_.info.height * map_.info.resolution + map_.info.origin.position.y;

        // Generate the Voronoi diagram using the collected x and y values, the number of points, and the min and max x and y values
        VDG.generateVoronoi(xvalues,yvalues,agent_catalog_.size(),float(xmin),float(xmax),float(ymin),float(ymax));

        float x1,y1,x2,y2;

        ROS_DEBUG("SimpleDeployment: collecting line segments from the VDG object");
        // Collect the generated line segments from the VDG object
        while(VDG.getNext(x1,y1,x2,y2))
        {
            // Scale the line segment end-point coordinates to column and row numbers in image
            int c1 = ( x1 - map_.info.origin.position.x ) * factor / map_.info.resolution;
            int r1 = vorImg.rows - ( y1 - map_.info.origin.position.y ) * factor / map_.info.resolution;
            int c2 = ( x2 - map_.info.origin.position.x ) * factor / map_.info.resolution;
            int r2 = vorImg.rows - ( y2 - map_.info.origin.position.y ) * factor / map_.info.resolution;

            // Draw line segment
            cv::Point pt1 = cv::Point(c1,r1),
                      pt2 = cv::Point(c2,r2);
            cv::line(vorImg,pt1,pt2,WHITE);
        }

        ROS_DEBUG("SimpleDeployment: drawing map occupancygrid and resizing to voronoi image size");
        // Create cv image from map data and resize it to the same size as voronoi image
        cv::Mat mapImg = drawMap();
        cv::Mat viewImg(vorImg.size(),CV_8UC1);
        cv::resize(mapImg, viewImg, vorImg.size(), 0.0, 0.0, cv::INTER_NEAREST );

        // Add images together to make the total image
        cv::Mat totalImg(vorImg.size(),CV_8UC1);
        cv::bitwise_or(viewImg,vorImg,totalImg);

        cv::Mat celImg = cv::Mat::zeros(vorImg.rows+2, vorImg.cols+2, CV_8UC1);
        cv::Scalar newVal = cv::Scalar(1), upDiff = cv::Scalar(100), loDiff = cv::Scalar(256);
        cv::Rect rect;

        cv::floodFill(totalImg,celImg,seedPt,newVal,&rect,loDiff,upDiff,4 + (255 << 8) + cv::FLOODFILL_MASK_ONLY);

        // Compute the center of mass of the Voronoi cell
        cv::Moments m = moments(celImg, false);
        cv::Point centroid(m.m10/m.m00, m.m01/m.m00);

        cv::circle( celImg, centroid, 3, BLACK, -1, 8);

        // Convert seed point to celImg coordinate system (totalImg(x,y) = celImg(x+1,y+1)
        cv::Point onePt(1,1);
        centroid = centroid - onePt;

        for ( uint i = 0; i < agent_catalog_.size(); i++ ){

            int c = ( xvalues[i] - map_.info.origin.position.x ) * factor / map_.info.resolution;
            int r = map_.info.height * factor - ( yvalues[i] - map_.info.origin.position.y ) * factor / map_.info.resolution;
            cv::Point pt =  cv::Point(c,r);
            cv::circle( totalImg, pt, 3, GRAY, -1, 8);
        }

        cv::circle( totalImg, seedPt, 3, WHITE, -1, 8);
        cv::circle( totalImg, centroid, 2, WHITE, -1, 8);			//where centroid is the goal position
	
	// Due to bandwidth issues, only display this image if requested
	if (show_cells_) {
	        cv::imshow( "Voronoi Cells", totalImg );
	}
        cv::waitKey(3);

        // Scale goal position in map back to true goal position
        geometry_msgs::Pose goalPose;
//        goalPose.position.x = centroid.x * map_.info.resolution / factor + map_.info.origin.position.x;
        goalPose.position.x = centroid.x / factor + map_.info.origin.position.x;
//        goalPose.position.y = (map_.info.height - centroid.y / factor) * map_.info.resolution + map_.info.origin.position.y;
        goalPose.position.y = (map_.info.height - centroid.y / factor) + map_.info.origin.position.y;
        double phi = atan2( seedPt.y - centroid.y, centroid.x - seedPt.x );
        goalPose.orientation = tf::createQuaternionMsgFromYaw(phi);

        goal_.pose = goalPose;
        goal_.header.frame_id = "map";
        goal_.header.stamp = ros::Time::now();

        goal_pub_.publish(goal_);

//        move_base_msgs::MoveBaseGoal goal;

//        goal.target_pose.header.frame_id = "map";
//        goal.target_pose.header.stamp = ros::Time::now();

//        goal.target_pose.pose = goalPose;

//        ac_.sendGoal(goal);
    }
    else {
        ROS_DEBUG("SimpleDeployment: No map received");
    }

}
Esempio n. 13
0
Template::Template(QString path){
    width = -2;
    height = -2;
    for (int i = 0; i < TEMPLATES_COUNT; i++){
        QString file = path + QString::number(i) + ".png";
        cv::Mat im = cv::imread(file.toStdString());
        cvtColor(im, images[i], CV_RGB2GRAY);

        if (width == -2){
            width = images[i].cols;
        }
        else if (images[i].cols != width){
            qDebug() << "Error : all templates must be the same size";
        }

        if (height == -2){
            height = images[i].rows;
        }
        else if (images[i].rows != height){
            qDebug() << "Error : all templates must be the same size";
        }

        std::vector<std::vector<cv::Point> > templateContours;
        std::vector<cv::Vec4i> hierarchy;

        findContours(images[i].clone(), templateContours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0));

        if (templateContours.size() == 0){
            qDebug() << "Error, template doesn't have a contour";
        }
        else {
            cv::Moments imageMoments = moments(templateContours[0], false);
            massCenters[i] = cv::Point2f(imageMoments.m10/imageMoments.m00 , imageMoments.m01/imageMoments.m00);
        }

        cv::Mat firstHalf = cv::Mat(images[i].clone(), cv::Rect(0, 0, images[i].cols/2, images[i].rows));
        cv::Mat secondHalf = cv::Mat(images[i].clone(), cv::Rect(images[i].cols/2, 0, images[i].cols/2, images[i].rows));

        halfMassCenters[i][0] = getMassCenterFromImage(firstHalf);
        halfMassCenters[i][1] = getMassCenterFromImage(secondHalf);

        cv::Mat firstHalfHori = cv::Mat(images[i].clone(), cv::Rect(0, 0, images[i].cols, images[i].rows/2));
        cv::Mat secondHalfHori = cv::Mat(images[i].clone(), cv::Rect(0, images[i].rows/2, images[i].cols, images[i].rows/2));

        halfMassCentersHori[i][0] = getMassCenterFromImage(firstHalfHori);
        halfMassCentersHori[i][1] = getMassCenterFromImage(secondHalfHori);


        Histogram* hori = new Histogram(width);
        Histogram* verti = new Histogram(height);

        for (int x = 0; x < images[i].cols; x++){
            for (int y = 0; y < images[i].rows; y++){
                hori->add(x, images[i].at<uchar>(y, x)/255.0);
                verti->add(y ,images[i].at<uchar>(y, x)/255.0);
            }
        }

        histoHori[i] = hori;
        histoVerti[i] = verti;
    }
}
    void TestElementAreaPerimeterCentroidAndMoments()
    {
        // Test methods with a regular cylindrical honeycomb mesh
        CylindricalHoneycombVertexMeshGenerator generator(4, 4);
        Cylindrical2dVertexMesh* p_mesh = generator.GetCylindricalMesh();

        TS_ASSERT_EQUALS(p_mesh->GetNumNodes(), 40u);

        // Test area and perimeter calculations for all elements
        for (unsigned i=0; i<p_mesh->GetNumElements(); i++)
        {
            TS_ASSERT_DELTA(p_mesh->GetVolumeOfElement(i), 0.8660, 1e-4);
            TS_ASSERT_DELTA(p_mesh->GetSurfaceAreaOfElement(i), 3.4641, 1e-4);
        }

        // Test centroid calculations for non-periodic element
        c_vector<double, 2> centroid = p_mesh->GetCentroidOfElement(5);
        TS_ASSERT_DELTA(centroid(0), 2.0, 1e-4);
        TS_ASSERT_DELTA(centroid(1), 5.0*0.5/sqrt(3.0), 1e-4);

        // Test centroid calculations for periodic element
        centroid = p_mesh->GetCentroidOfElement(7);
        TS_ASSERT_DELTA(centroid(0), 0.0, 1e-4);
        TS_ASSERT_DELTA(centroid(1), 5.0*0.5/sqrt(3.0), 1e-4);

        // Test CalculateMomentOfElement() for all elements
        // all elements are regular hexagons with edge 1/sqrt(3.0)
        c_vector<double, 3> moments;
        for (unsigned i=0; i<p_mesh->GetNumElements(); i++)
        {
            moments = p_mesh->CalculateMomentsOfElement(i);

            TS_ASSERT_DELTA(moments(0), 5*sqrt(3.0)/16/9, 1e-6); // Ixx
            TS_ASSERT_DELTA(moments(1), 5*sqrt(3.0)/16/9, 1e-6); // Iyy
            TS_ASSERT_DELTA(moments(2), 0.0, 1e-6);            // Ixy = 0 by symmetry
        }

        // Test methods with a cylindrical mesh comprising a single rectangular element
        std::vector<Node<2>*> rectangle_nodes;
        rectangle_nodes.push_back(new Node<2>(0, false, 8.0, 2.0));
        rectangle_nodes.push_back(new Node<2>(1, false, 8.0, 0.0));
        rectangle_nodes.push_back(new Node<2>(2, false, 2.0, 0.0));
        rectangle_nodes.push_back(new Node<2>(3, false, 2.0, 2.0));
        std::vector<VertexElement<2,2>*> rectangle_elements;
        rectangle_elements.push_back(new VertexElement<2,2>(0, rectangle_nodes));

        Cylindrical2dVertexMesh rectangle_mesh(10.0, rectangle_nodes, rectangle_elements);

        TS_ASSERT_DELTA(rectangle_mesh.GetVolumeOfElement(0), 8.0, 1e-10);
        TS_ASSERT_DELTA(rectangle_mesh.GetSurfaceAreaOfElement(0), 12.0, 1e-4);

        ///\todo #2393 - for consistency, the centroid should be at (0, 2.5)
        c_vector<double, 2> rectangle_centroid = rectangle_mesh.GetCentroidOfElement(0);
        TS_ASSERT_DELTA(rectangle_centroid(0), 10.0, 1e-4);
        TS_ASSERT_DELTA(rectangle_centroid(1), 1.0, 1e-4);

        c_vector<double, 3> rectangle_moments = rectangle_mesh.CalculateMomentsOfElement(0);
        TS_ASSERT_DELTA(rectangle_moments(0), 8.0/3.0, 1e-6);  // Ixx
        TS_ASSERT_DELTA(rectangle_moments(1), 32.0/3.0, 1e-6); // Iyy
        TS_ASSERT_DELTA(rectangle_moments(2), 0.0, 1e-6);      // Ixy = 0 by symmetry

        c_vector<double, 2> rectangle_short_axis = rectangle_mesh.GetShortAxisOfElement(0);
        TS_ASSERT_DELTA(rectangle_short_axis(0), 0.0, 1e-4);
        TS_ASSERT_DELTA(rectangle_short_axis(1), 1.0, 1e-4);
    }
Esempio n. 15
0
string OCV::trackAndEval(Mat &threshold, Mat &canvas){
  //~ Mat temp;
  //~ threshold.copyTo(temp);
  //these two vectors needed for output of findContours
  vector< vector<Point> > contours;
  vector<Vec4i> hierarchy;
  //find contours of filtered image using openCV findContours function
  findContours(threshold, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
  //use moments method to find our filtered object
  string retValue = "";
  double area;
  int numObjects = hierarchy.size();
  if (debounceCounter) debounceCounter--;
  if (numObjects > 0) {
    //if number of objects greater than MAX_NUM_OBJECTS we have a noisy filter
    if (numObjects==1){
      Moments moment = moments((Mat)contours[0]);
      area = moment.m00;
      Point lastPoint(
        moment.m10/area, // x
        moment.m01/area  // y
      );
      drawObject(area, lastPoint, canvas);
      // Evaluate in which position of the grid the point is
      // state machine
      // TODO CHECk bounding rectangles and contour to evaluate it. Use the layout form PNG image!
      // expression limits
      switch (trackState) {
        case TrackStt::NO_TRACK:
          cout << "TrackStt::NO_TRACK" << endl;
        case TrackStt::UNARMED:
          if (lastPoint.x > EXP_HORI_L) {
            trackState = TrackStt::EXPRESSION;
            //~ cout << "Next state TrackStt::EXPRESSION" << endl; 
          }
          else if (lastPoint.y > BUTT_VER_T && lastPoint.y < BUTT_VER_B) {
            trackState = TrackStt::ARMED;
            cout << "Next state TrackStt::ARMED" << endl;
          }
          else {
            trackState = TrackStt::UNARMED;
          }
          break;
        case TrackStt::ARMED:
          if (lastPoint.x > EXP_HORI_L && lastPoint.x < EXP_HORI_R) {
            trackState = TrackStt::EXPRESSION;
          }
          else if (lastPoint.y > BUTT_VER_B) {
            trackState = TrackStt::DEBOUNCING;
            debounceCounter = debouceFrames;
            if (lastPoint.x > B1_HORI_L && lastPoint.x < B1_HORI_R) {
              cout << "1" << endl; 
              retValue = "1";
            }
            else if (lastPoint.x > B2_HORI_L && lastPoint.x < B2_HORI_R) {
              cout << "2" << endl; 
              retValue = "2";
            }
            else if (lastPoint.x > B3_HORI_L && lastPoint.x < B3_HORI_R) {
              cout << "3" << endl; 
              retValue = "3";
            }
            else if (lastPoint.x > B4_HORI_L && lastPoint.x < B4_HORI_R) {
              cout << "4" << endl; 
              retValue = "4";
            }
          }
          else if (lastPoint.y < BUTT_VER_T) {
            trackState = TrackStt::DEBOUNCING;
            debounceCounter = debouceFrames;
            if (lastPoint.x > B5_HORI_L && lastPoint.x < B5_HORI_R) {
              cout << "5" << endl; 
              retValue = "5";
            }
            else if (lastPoint.x > B6_HORI_L && lastPoint.x < B6_HORI_R) {
              cout << "6" << endl; 
              retValue = "6";
            }
          }
          break;
        case TrackStt::DEBOUNCING: 
          //~ cout << "DEBOUNCING" << endl; 
          if (debounceCounter==0) 
            trackState = TrackStt::UNARMED;
          break;
        case TrackStt::EXPRESSION: 
          if (lastPoint.x < EXP_HORI_L) {
              trackState = TrackStt::UNARMED;
          }
          else{ 
            // TODO make a previous level comparition
            int expLevel;
            if (lastPoint.y > EXP_VER_B) 
              expLevel = 0;
            else if (lastPoint.y < EXP_VER_T)
              expLevel = expressionDiv-1;
            else {
              float ylevel = (float)(lastPoint.y-EXP_VER_T)/(float)(EXP_VER_RANGE);
              expLevel = (int)((float)(expressionDiv-1)*(1.0 - ylevel));
            }
            if (expLevel!=lastExLevel) {
              cout << "Expression level:" << expLevel << endl; 
              retValue = "X"+to_string(expLevel);
              lastExLevel = expLevel;
            }
          }
          break;
        default: 
          break;
      } 
      return retValue;
    }
    else {
      if (trackState!=TrackStt::DEBOUNCING) trackState = TrackStt::NO_TRACK;
      //void putText(Mat& img, const string& text, Point org, int fontFace, double fontScale, Scalar color, int thickness=1, int lineType=8, bool bottomLeftOrigin=false )
      putText(canvas, "More than one object detected!", Point(2, FRAME_HEIGHT-10), 1, 0.7, Scalar(0,0,255), 1);
      cout << "More than one object detected! Next state is TrackStt::NO_TRACK" << endl; 
    }
  }
  if (trackState!=TrackStt::DEBOUNCING) trackState = TrackStt::NO_TRACK;
  return retValue;
}
Esempio n. 16
0
void RecognitionDemos( Mat& full_image, Mat& template1, Mat& template2, Mat& template1locations, Mat& template2locations, VideoCapture& bicycle_video, Mat& bicycle_background, Mat& bicycle_model, VideoCapture& people_video, CascadeClassifier& cascade, Mat& numbers, Mat& good_orings, Mat& bad_orings, Mat& unknown_orings )
{
	Timestamper* timer = new Timestamper();

	// Principal Components Analysis
	PCASimpleExample();
    char ch = cvWaitKey();
	cvDestroyAllWindows();

	PCAFaceRecognition();
    ch = cvWaitKey();
	cvDestroyAllWindows();

	// Statistical Pattern Recognition
	Mat gray_numbers,binary_numbers;
	cvtColor(numbers, gray_numbers, CV_BGR2GRAY);
	threshold(gray_numbers,binary_numbers,128,255,THRESH_BINARY_INV);
    vector<vector<Point>> contours;
	vector<Vec4i> hierarchy;
	findContours(binary_numbers,contours,hierarchy,CV_RETR_TREE,CV_CHAIN_APPROX_NONE);
	Mat contours_image = Mat::zeros(binary_numbers.size(), CV_8UC3);
	contours_image = Scalar(255,255,255);
	// Do some processing on all contours (objects and holes!)
	vector<RotatedRect> min_bounding_rectangle(contours.size());
	vector<vector<Point>> hulls(contours.size());
	vector<vector<int>> hull_indices(contours.size());
	vector<vector<Vec4i>> convexity_defects(contours.size());
	vector<Moments> contour_moments(contours.size());
	for (int contour_number=0; (contour_number<(int)contours.size()); contour_number++)
	{
		if (contours[contour_number].size() > 10)
		{
			min_bounding_rectangle[contour_number] = minAreaRect(contours[contour_number]);
			convexHull(contours[contour_number], hulls[contour_number]);
			convexHull(contours[contour_number], hull_indices[contour_number]);
			convexityDefects( contours[contour_number], hull_indices[contour_number], convexity_defects[contour_number]);
			contour_moments[contour_number] = moments( contours[contour_number] );
		}
	}
	for (int contour_number=0; (contour_number>=0); contour_number=hierarchy[contour_number][0])
	{
		if (contours[contour_number].size() > 10)
		{
        Scalar colour( rand()&0x7F, rand()&0x7F, rand()&0x7F );
        drawContours( contours_image, contours, contour_number, colour, CV_FILLED, 8, hierarchy );
		char output[500];
		double area = contourArea(contours[contour_number])+contours[contour_number].size()/2+1;
		// Process any holes (removing the area from the are of the enclosing contour)
		for (int hole_number=hierarchy[contour_number][2]; (hole_number>=0); hole_number=hierarchy[hole_number][0])
		{
			area -= (contourArea(contours[hole_number])-contours[hole_number].size()/2+1);
			Scalar colour( rand()&0x7F, rand()&0x7F, rand()&0x7F );
 			drawContours( contours_image, contours, hole_number, colour, CV_FILLED, 8, hierarchy );
			sprintf(output,"Area=%.0f", contourArea(contours[hole_number])-contours[hole_number].size()/2+1);
			Point location( contours[hole_number][0].x +20, contours[hole_number][0].y +5 );
			putText( contours_image, output, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
		}
		// Draw the minimum bounding rectangle
		Point2f bounding_rect_points[4];
		min_bounding_rectangle[contour_number].points(bounding_rect_points);
		line( contours_image, bounding_rect_points[0], bounding_rect_points[1], Scalar(0, 0, 127));
		line( contours_image, bounding_rect_points[1], bounding_rect_points[2], Scalar(0, 0, 127));
		line( contours_image, bounding_rect_points[2], bounding_rect_points[3], Scalar(0, 0, 127));
		line( contours_image, bounding_rect_points[3], bounding_rect_points[0], Scalar(0, 0, 127));
		float bounding_rectangle_area = min_bounding_rectangle[contour_number].size.area();
		// Draw the convex hull
        drawContours( contours_image, hulls, contour_number, Scalar(127,0,127) );
		// Highlight any convexities
		int largest_convexity_depth=0;
		for (int convexity_index=0; convexity_index < (int)convexity_defects[contour_number].size(); convexity_index++)
		{
			if (convexity_defects[contour_number][convexity_index][3] > largest_convexity_depth)
				largest_convexity_depth = convexity_defects[contour_number][convexity_index][3];
			if (convexity_defects[contour_number][convexity_index][3] > 256*2)
			{
				line( contours_image, contours[contour_number][convexity_defects[contour_number][convexity_index][0]], contours[contour_number][convexity_defects[contour_number][convexity_index][2]], Scalar(0,0, 255));
				line( contours_image, contours[contour_number][convexity_defects[contour_number][convexity_index][1]], contours[contour_number][convexity_defects[contour_number][convexity_index][2]], Scalar(0,0, 255));
			}
		}
		double hu_moments[7];
		HuMoments( contour_moments[contour_number], hu_moments );
		sprintf(output,"Perimeter=%d, Area=%.0f, BArea=%.0f, CArea=%.0f", contours[contour_number].size(),area,min_bounding_rectangle[contour_number].size.area(),contourArea(hulls[contour_number]));
		Point location( contours[contour_number][0].x, contours[contour_number][0].y-3 );
		putText( contours_image, output, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
		sprintf(output,"HuMoments = %.2f, %.2f, %.2f", hu_moments[0],hu_moments[1],hu_moments[2]);
		Point location2( contours[contour_number][0].x+100, contours[contour_number][0].y-3+15 );
		putText( contours_image, output, location2, FONT_HERSHEY_SIMPLEX, 0.4, colour );
		}
	}
	imshow("Shape Statistics", contours_image );
	char c = cvWaitKey();
	cvDestroyAllWindows();

	// Support Vector Machine
	imshow("Good - original",good_orings);
	imshow("Defective - original",bad_orings);
	imshow("Unknown - original",unknown_orings);
	SupportVectorMachineDemo(good_orings,"Good",bad_orings,"Defective",unknown_orings);
	c = cvWaitKey();
	cvDestroyAllWindows();

	// Template Matching
	Mat display_image, correlation_image;
	full_image.copyTo( display_image );
	double min_correlation, max_correlation;
	Mat matched_template_map;
	int result_columns =  full_image.cols - template1.cols + 1;
	int result_rows = full_image.rows - template1.rows + 1;
	correlation_image.create( result_columns, result_rows, CV_32FC1 );
	timer->reset();
	double before_tick_count = static_cast<double>(getTickCount());
	matchTemplate( full_image, template1, correlation_image, CV_TM_CCORR_NORMED );
	double after_tick_count = static_cast<double>(getTickCount());
	double duration_in_ms = 1000.0*(after_tick_count-before_tick_count)/getTickFrequency();
	minMaxLoc( correlation_image, &min_correlation, &max_correlation );
	FindLocalMaxima( correlation_image, matched_template_map, max_correlation*0.99 );
	timer->recordTime("Template Matching (1)");
	Mat matched_template_display1;
	cvtColor(matched_template_map, matched_template_display1, CV_GRAY2BGR);
	Mat correlation_window1 = convert_32bit_image_for_display( correlation_image, 0.0 );
	DrawMatchingTemplateRectangles( display_image, matched_template_map, template1, Scalar(0,0,255) );
	double precision, recall, accuracy, specificity, f1;
	Mat template1locations_gray;
	cvtColor(template1locations, template1locations_gray, CV_BGR2GRAY);
	CompareRecognitionResults( matched_template_map, template1locations_gray, precision, recall, accuracy, specificity, f1 );
	char results[400];
	Scalar colour( 255, 255, 255);
	sprintf( results, "precision=%.2f", precision);
	Point location( 7, 213 );
	putText( display_image, "Results (1)", location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "recall=%.2f", recall);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "accuracy=%.2f", accuracy);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "specificity=%.2f", specificity);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "f1=%.2f", f1);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
  
	result_columns =  full_image.cols - template2.cols + 1;
	result_rows = full_image.rows - template2.rows + 1;
	correlation_image.create( result_columns, result_rows, CV_32FC1 );
	timer->ignoreTimeSinceLastRecorded();
	matchTemplate( full_image, template2, correlation_image, CV_TM_CCORR_NORMED );
	minMaxLoc( correlation_image, &min_correlation, &max_correlation );
	FindLocalMaxima( correlation_image, matched_template_map, max_correlation*0.99 );
	timer->recordTime("Template Matching (2)");
	Mat matched_template_display2;
	cvtColor(matched_template_map, matched_template_display2, CV_GRAY2BGR);
	Mat correlation_window2 = convert_32bit_image_for_display( correlation_image, 0.0 );
	DrawMatchingTemplateRectangles( display_image, matched_template_map, template2, Scalar(0,0,255) );
	timer->putTimes(display_image);
	Mat template2locations_gray;
	cvtColor(template2locations, template2locations_gray, CV_BGR2GRAY);
	CompareRecognitionResults( matched_template_map, template2locations_gray, precision, recall, accuracy, specificity, f1 );
	sprintf( results, "precision=%.2f", precision);
	location.x = 123;
	location.y = 213;
	putText( display_image, "Results (2)", location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "recall=%.2f", recall);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "accuracy=%.2f", accuracy);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "specificity=%.2f", specificity);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "f1=%.2f", f1);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	Mat correlation_display1, correlation_display2;
	cvtColor(correlation_window1, correlation_display1, CV_GRAY2BGR);
	cvtColor(correlation_window2, correlation_display2, CV_GRAY2BGR);

	Mat output1 = JoinImagesVertically(template1,"Template (1)",correlation_display1,"Correlation (1)",4);
	Mat output2 = JoinImagesVertically(output1,"",matched_template_display1,"Local maxima (1)",4);
	Mat output3 = JoinImagesVertically(template2,"Template (2)",correlation_display2,"Correlation (2)",4);
	Mat output4 = JoinImagesVertically(output3,"",matched_template_display2,"Local maxima (2)",4);
	Mat output5 = JoinImagesHorizontally( full_image, "Original Image", output2, "", 4 );
	Mat output6 = JoinImagesHorizontally( output5, "", output4, "", 4 );
	Mat output7 = JoinImagesHorizontally( output6, "", display_image, "", 4 );
	imshow( "Template matching result", output7 );
	c = cvWaitKey();
	cvDestroyAllWindows();

	// Chamfer Matching
    Mat model_gray,model_edges,model_edges2;
	cvtColor(bicycle_model, model_gray, CV_BGR2GRAY);
	threshold(model_gray,model_edges,127,255,THRESH_BINARY);
	Mat current_frame;
	bicycle_video.set(CV_CAP_PROP_POS_FRAMES,400);  // Just in case the video has already been used.
	bicycle_video >> current_frame;
	bicycle_background = current_frame.clone();
	bicycle_video.set(CV_CAP_PROP_POS_FRAMES,500); 
	timer->reset();
	int count = 0;
	while (!current_frame.empty() && (count < 8))
    {
		Mat result_image = current_frame.clone();
		count++;
		Mat difference_frame, difference_gray, current_edges;
		absdiff(current_frame,bicycle_background,difference_frame);
		cvtColor(difference_frame, difference_gray, CV_BGR2GRAY);
		Canny(difference_frame, current_edges, 100, 200, 3);

		vector<vector<Point> > results;
		vector<float> costs;
		threshold(model_gray,model_edges,127,255,THRESH_BINARY);
		Mat matching_image, chamfer_image, local_minima;
		timer->ignoreTimeSinceLastRecorded();
		threshold(current_edges,current_edges,127,255,THRESH_BINARY_INV);
		distanceTransform( current_edges, chamfer_image, CV_DIST_L2 , 3);
		timer->recordTime("Chamfer Image");
		ChamferMatching( chamfer_image, model_edges, matching_image );
		timer->recordTime("Matching");
		FindLocalMinima( matching_image, local_minima, 500.0 );
		timer->recordTime("Find Minima");
		DrawMatchingTemplateRectangles( result_image, local_minima, model_edges, Scalar( 255, 0, 0 ) );
		Mat chamfer_display_image = convert_32bit_image_for_display( chamfer_image );
		Mat matching_display_image = convert_32bit_image_for_display( matching_image );
		//timer->putTimes(result_image);
		Mat current_edges_display, local_minima_display, model_edges_display, colour_matching_display_image, colour_chamfer_display_image;
		cvtColor(current_edges, current_edges_display, CV_GRAY2BGR);
		cvtColor(local_minima, local_minima_display, CV_GRAY2BGR);
		cvtColor(model_edges, model_edges_display, CV_GRAY2BGR);
		cvtColor(matching_display_image, colour_matching_display_image, CV_GRAY2BGR);
		cvtColor(chamfer_display_image, colour_chamfer_display_image, CV_GRAY2BGR);

		Mat output1 = JoinImagesVertically(current_frame,"Video Input",current_edges_display,"Edges from difference", 4);
		Mat output2 = JoinImagesVertically(output1,"",model_edges_display,"Model", 4);
		Mat output3 = JoinImagesVertically(bicycle_background,"Static Background",colour_chamfer_display_image,"Chamfer image", 4);
		Mat output4 = JoinImagesVertically(output3,"",colour_matching_display_image,"Degree of fit", 4);
		Mat output5 = JoinImagesVertically(difference_frame,"Difference",result_image,"Result", 4);
		Mat output6 = JoinImagesVertically(output5,"",local_minima_display,"Local minima", 4);
		Mat output7 = JoinImagesHorizontally( output2, "", output4, "", 4 );
		Mat output8 = JoinImagesHorizontally( output7, "", output6, "", 4 );
		imshow("Chamfer matching", output8);
		c = waitKey(1000);  // This makes the image appear on screen
		bicycle_video >> current_frame;
	}
	c = cvWaitKey();
	cvDestroyAllWindows();

	// Cascade of Haar classifiers (most often shown for face detection).
    VideoCapture camera;
	camera.open(1);
	camera.set(CV_CAP_PROP_FRAME_WIDTH, 320);
	camera.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
    if( camera.isOpened() )
	{
		timer->reset();
		Mat current_frame;
		do {
			camera >> current_frame;
			if( current_frame.empty() )
				break;
			vector<Rect> faces;
			timer->ignoreTimeSinceLastRecorded();
			Mat gray;
			cvtColor( current_frame, gray, CV_BGR2GRAY );
			equalizeHist( gray, gray );
			cascade.detectMultiScale( gray, faces, 1.1, 2, CV_HAAR_SCALE_IMAGE, Size(30, 30) );
			timer->recordTime("Haar Classifier");
			for( int count = 0; count < (int)faces.size(); count++ )
				rectangle(current_frame, faces[count], cv::Scalar(255,0,0), 2);
			//timer->putTimes(current_frame);
			imshow( "Cascade of Haar Classifiers", current_frame );
			c = waitKey(10);  // This makes the image appear on screen
        } while (c == -1);
	}
Esempio n. 17
0
void VideoCorrect::correctImage(Mat& inputFrame, Mat& outputFrame, bool developerMode){
	
	resize(inputFrame, inputFrame, CAMERA_RESOLUTION);
	inputFrame.copyTo(img);

	//Convert to YCbCr color space
	cvtColor(img, ycbcr, CV_BGR2YCrCb);

	//Skin color thresholding
	inRange(ycbcr, Scalar(0, 150 - Cr, 100 - Cb), Scalar(255, 150 + Cr, 100 + Cb), bw);

	if(IS_INITIAL_FRAME){
		face = detectFaces(img);
		if(face.x != 0){
			lastFace = face;
		}
		else{
			outputFrame = img;
			return;
		}
		prevSize = Size(face.width/2, face.height/2);
		head = Mat::zeros(bw.rows, bw.cols, bw.type());
		ellipse(head, Point(face.x + face.width/2, face.y + face.height/2), prevSize, 0, 0, 360, Scalar(255,255,255,0), -1, 8, 0);
		if(face.x > 0 && face.y > 0 && face.width > 0 && face.height > 0 
			&& (face.x + face.width) < img.cols && (face.y + face.height) < img.rows){
			img(face).copyTo(bestImg);
		}
		putText(img, "Give your best pose!", Point(face.x, face.y), CV_FONT_HERSHEY_SIMPLEX, 0.4, Scalar(255,255,255,0), 1, CV_AA);
	}

	firstFrameCounter--;

	if(face.x == 0) //missing face prevention
		face = lastFace;

	//Mask the background out
	bw &= head;

	//Compute more accurate image moments after background removal
	m = moments(bw, true);
	angle = (atan((2*m.nu11)/(m.nu20-m.nu02))/2)*180/PI;
	center = Point(m.m10/m.m00,m.m01/m.m00);

	//Smooth rotation (running average)
	bufferCounter++;
	rotationBuffer[ bufferCounter % SMOOTHER_SIZE ] = angle;
	smoothAngle += (angle - rotationBuffer[(bufferCounter + 1) % SMOOTHER_SIZE]) / SMOOTHER_SIZE;

	//Expand borders
	copyMakeBorder( img, img, BORDER_EXPAND, BORDER_EXPAND, BORDER_EXPAND, BORDER_EXPAND, 
					BORDER_REPLICATE, Scalar(255,255,255,0));

	if(!IS_INITIAL_FRAME){
		//Rotate the image to correct the leaning angle
		rotateImage(img, smoothAngle);
	
		//After rotation detect faces
		face = detectFaces(img);
		if(face.x != 0)
			lastFace = face;

		//Create background mask around the face
		head = Mat::zeros(bw.rows, bw.cols, bw.type());
		ellipse(head, Point(face.x - BORDER_EXPAND + face.width/2, face.y -BORDER_EXPAND + face.height/2),
					  prevSize, 0, 0, 360, Scalar(255,255,255,0), -1, 8, 0);

		//Draw a rectangle around the face
		//rectangle(img, face, Scalar(255,255,255,0), 1, 8, 0);

		//Overlay the ideal pose
		if(replaceFace && center.x > 0 && center.y > 0){
			center = Point(face.x + face.width/2, face.y + face.width/2);
			overlayImage(img, bestImg, center, smoothSize);
		}

	} else{
		face.x += BORDER_EXPAND; //position alignment after border expansion (not necessary if we detect the face after expansion)
		face.y += BORDER_EXPAND;
	}
	
	//Smooth ideal image size (running average)
	sizeBuffer[ bufferCounter % SMOOTHER_SIZE ] = face.width;
	smoothSize += (face.width - sizeBuffer[(bufferCounter + 1) % SMOOTHER_SIZE]) / SMOOTHER_SIZE;

	//Get ROI
	center = Point(face.x + face.width/2, face.y + face.width/2);
	roi = getROI(img, center);
	if(roi.x > 0 && roi.y > 0 && roi.width > 0 && roi.height > 0 
		&& (roi.x + roi.width) < img.cols && (roi.y + roi.height) < img.rows){
		img = img(roi);
	}

	//Resize the final image
	resize(img, img, CAMERA_RESOLUTION);

	if(developerMode){

		Mat developerScreen(img.rows, 
							img.cols + 
							inputFrame.cols +
							bw.cols, CV_8UC3);

		Mat left(developerScreen, Rect(0, 0, img.size().width, img.size().height));
		img.copyTo(left);

		Mat center(developerScreen, Rect(img.cols, 0, inputFrame.cols, inputFrame.rows));
		inputFrame.copyTo(center);

		cvtColor(bw, bw, CV_GRAY2BGR);
		Mat right(developerScreen, Rect(img.size().width + inputFrame.size().width, 0, bw.size().width, bw.size().height));
		bw.copyTo(right);

		Mat rightmost(developerScreen, Rect(img.size().width + inputFrame.size().width + bw.size().width - bestImg.size().width, 0,
											bestImg.size().width, bestImg.size().height));
		bestImg.copyTo(rightmost);

		outputFrame = developerScreen;
	}
	else{
		outputFrame = img;
	}
}
Esempio n. 18
0
bool ContourModel::update(vector<vector<Point> > contours,vector<Point2d>& originalPoints, int image_w_half)
{
    
    //step 1: find the larger contours to filter out some noise (area > thresh)
    vector<vector<Point> > largeContours;
    
    int areaThreshold = 130;
    for(int i = 0;i < (int)contours.size();i++)
    {
        vector<Point> currCont = contours.at(i);
        double area = contourArea(contours.at(i));
        if(area > areaThreshold)
        {
            largeContours.push_back(currCont);
        }
    }

    
    //step 2: for each larger contour: find the center of mass and the lane direction to group them
    vector<Point2d> mass_centers;
    vector<Point2d> line_directions;
    for(int i = 0;i < (int)largeContours.size();i++)
    {
        //calculate the line direction for each contour
        Vec4f lineParams;
        fitLine(largeContours.at(i), lineParams, CV_DIST_L2, 0, 0.01, 0.01);
        Point2d lineDirection(lineParams[0],lineParams[1]);
        line_directions.push_back(lineDirection);
        
        //calculate the mass center for each contour
        vector<Moments> contourMoments;
        Moments currMoments = moments(largeContours.at(i));
        double x_cent = currMoments.m10 / currMoments.m00;
        double y_cent = currMoments.m01 / currMoments.m00;
        Point2d mass_cent(x_cent,y_cent);
        mass_centers.push_back(mass_cent);    
    }

    //assert these vectors have same length:
    if(largeContours.size() != mass_centers.size())cout << "ERROR in ContourModel: massCenters.size != largeContours.size()" << endl;
    if(largeContours.size() != line_directions.size())cout << "ERROR in ContourModel: massCenters.size != largeContours.size()" << endl;

    //step 3: create the "mergeList": store for each contour weather it wants to merge with another one
    vector<vector<int> > mergelist;
    //merge contours based on center of mass and line direction
    for(int i = 0;i < (int)largeContours.size();i++)
    {
        vector<int> mergeWishes;
        
        Point2d currCenter = mass_centers.at(i);
        Point2d currDirection = line_directions.at(i);
        
        for(int j = i+1;j < (int)largeContours.size();j++)
        {
            Point2d compCenter = mass_centers.at(j);
            Point2d compDirection = line_directions.at(j);
            
            bool wantMerge = mergeContours(currCenter, currDirection, compCenter, compDirection);
            
            if(wantMerge)mergeWishes.push_back(j);
        }
        
        mergelist.push_back(mergeWishes);
    }

    //step 4: use the mergeList to create the final_mergelist which looks as follows:
    //[ [0,2,5] [3] [1] [4,6]] telling which contours should be merged together
    
    vector<vector<int> > final_mergelist;
    for(int i = 0;i < (int)largeContours.size();i++)
    {
        vector<int> temp;
        temp.push_back(i);
        final_mergelist.push_back(temp);
    }
    
    for(int i = 0;i < (int)largeContours.size();i++)
    {
        vector<int>* containerToPushTo = NULL;
        
        //step 1: find the container the contour i is in - note that this will always succeed so containerToPushTo wont stay NULL
        for(int j = 0;j < (int)final_mergelist.size();j++)
        {
            vector<int>* currContainer;
            currContainer = &final_mergelist.at(j);
            for(int k = 0;k < (int)final_mergelist.at(j).size();k++)
            {
                if(final_mergelist.at(j).at(k) == i)
                {
                    containerToPushTo = currContainer;
                }
            }
        }
        
        //step2: for each element to push: make sure it appears in the container
        for(int j = 0;j < (int)mergelist.at(i).size();j++)
        {
            int elemToMerge = mergelist.at(i).at(j);
            
            //if elemToMerge already appears in containerToPushTo => do nothing
            bool alreadyInContainer = false;
            for(int k = 0;k < (int)containerToPushTo->size();k++)
            {
                if(containerToPushTo->at(k) == elemToMerge)
                    alreadyInContainer = true;
            }
            
            //not inside: push the element and delete it from the old vector it was in
            if(!alreadyInContainer)
            {
                
                //delete it from the old container!!
                for(int k = 0;k < (int)final_mergelist.size();k++)
                {
                    for(int l = 0;l < (int)final_mergelist.at(k).size();l++)
                    {
                        //DEBUG IFS - ERASE LATER
                        if(k < 0 || k >= (int)final_mergelist.size())cout << "OVERFLOW IN 159::ContourModel" << endl;
                        if(l < 0 || l >= (int)final_mergelist.at(k).size())cout << "OVERFLOW IN 160::ContourModel" << endl;

                        if(final_mergelist.at(k).at(l) == elemToMerge)
                        {
                            //DEBUG IF- ERASE LATER
                            if(l < 0 || l >= (int)final_mergelist.at(k).size()) cout << "ERROR ContourModel 162" << endl;
                            final_mergelist.at(k).erase(final_mergelist.at(k).begin()+l);
                        }
                    }
                }
                
                //add it in the new container
                containerToPushTo->push_back(elemToMerge);
            }
            
        }
        
    }
    
    
    //step 5: merge the contours together
    vector< vector<vector<Point> > > mergedContours;
    
    for(int i = 0;i < (int)final_mergelist.size();i++)
    {
        vector<vector<Point> > currGrouping;
        for(int j = 0;j < (int)final_mergelist.at(i).size();j++)
        {
            vector<Point> currContour = largeContours.at(final_mergelist.at(i).at(j));
            currGrouping.push_back(currContour);
        }
        if(currGrouping.size() > 0)mergedContours.push_back(currGrouping);
        
    }


    //TRY TO FIND THE MIDDLE LANE

    vector<vector<Point> >          singleContours;
    vector<vector<vector<Point> > > multipleContours;

    for(int i = 0;i < (int)mergedContours.size();i++)
    {        
        vector<vector<Point> > currContGroup = mergedContours.at(i);
        if(currContGroup.size() == 1) singleContours.push_back(currContGroup.at(0));
        else if(currContGroup.size() > 1) multipleContours.push_back(currContGroup);                
    }

    //in this situation there is actually a chance to apply the middle lane extraction, otherwise the old procedure is applied
    if(multipleContours.size() == 1 && singleContours.size() <= 2 && singleContours.size() > 0)
    {

        //sort single contours by area
        std::sort(singleContours.begin(),singleContours.end(),acompareCont);
        vector<Point> largestSingleContour = singleContours.at(singleContours.size()-1);
        double areaLargestSingle = contourArea(largestSingleContour);

        vector<vector<Point> > middleContour = multipleContours.at(0);
        double areaMiddle = 0;
        bool validMid = true;
        for(int i = 0;i < (int)middleContour.size();i++)
        {
            double areaCurr = contourArea(middleContour.at(i));
            if(areaCurr > areaLargestSingle/2.0){
                validMid = false;
            }
            areaMiddle += contourArea(middleContour.at(i));
        }


        //if both contours have a certain size
        if(areaLargestSingle > 120 && areaMiddle > 120)
        {
            //MIDDLE LANE AND OTHER LANE FOUND => RETURN THE ESTIMATE

            //first argument will be the middle lane
            //second argument will be the other larger lane
            vector<vector<Point2d> > nicelyGroupedPoints;

            //1) --- MIDDLE LANE ---
            vector<Point2d> temp_result;
            for(int i = 0;i < (int)middleContour.size();i++)
            {
                vector<Point> currCont = middleContour.at(i);
                Rect bound = boundingRect(currCont);

                //visit every point in the bounding rect
                for(int y = bound.y;y < bound.y+bound.height;y++)
                {
                    for(int x = bound.x;x < bound.x+bound.width;x++)
                    {
                        if(pointPolygonTest(currCont, Point(x,y), false) >= 0)
                        {
                            temp_result.push_back(Point2d(x-image_w_half,y));
                        }
                    }
                }

            }

            nicelyGroupedPoints.push_back(temp_result);

            //2) --- OTHER LANE ---

            vector<Point2d> temp_result2;

            Rect bound = boundingRect(largestSingleContour);

            //visit every point in the bounding rect
            for(int y = bound.y;y < bound.y+bound.height;y++)
            {
                for(int x = bound.x;x < bound.x+bound.width;x++)
                {
                    if(pointPolygonTest(largestSingleContour, Point(x,y), false) >= 0)
                    {
                        temp_result2.push_back(Point2d(x-image_w_half,y));
                    }
                }
            }


            if(validMid)
            {
                nicelyGroupedPoints.push_back(temp_result2);
                points = nicelyGroupedPoints;
                return true; //middle lane estimate provided
            }
        }
    }

    //MIDDLE LANE WAS NOT FOUND



    //step 6: get the final result: the grouped points matching the contours
    //need to perform a inside contour check within the bounding rectangle of the contour for
    //each point in the bounding rectangle
    vector<vector<Point2d> > nicelyGroupedPoints;

    for(int i = 0;i < (int)mergedContours.size();i++)
    {
        vector<Point2d> temp_result;
        for(int j = 0;j < (int)mergedContours.at(i).size();j++)
        {
            vector<Point> currContour = mergedContours.at(i).at(j);
            Rect bound = boundingRect(currContour);

            //visit every point in the bounding rect
            for(int y = bound.y;y < bound.y+bound.height;y++)
            {
                for(int x = bound.x;x < bound.x+bound.width;x++)
                {
                    if(pointPolygonTest(currContour, Point(x,y), false) >= 0)
                    {
                        temp_result.push_back(Point2d(x-image_w_half,y));
                    }
                }
            }
        }

        if(temp_result.size() > 0)
        {
            nicelyGroupedPoints.push_back(temp_result);
        }
    }


    /*
    //step 6 (alternative): get the final result: the grouped points matching the contours
    //need to perform a inside contour check for the input points if in boundary rectangle of the contour
    vector<vector<Point2d> > nicelyGroupedPoints;

    for(int i = 0;i < mergedContours.size();i++)
    {

        vector<Point2d> temp_result;
        for(int j = 0;j < mergedContours.at(i).size();j++)
        {
            vector<Point> currContour = mergedContours.at(i).at(j);
            Rect bound = boundingRect(currContour);

            for(int k = 0;k < originalPoints.size();k++)
            {
                //check if within the contour:
                if(pointPolygonTest(currContour, originalPoints.at(k), false) >= 0)
                {
                    temp_result.push_back(Point2d(originalPoints.at(k).x-image_w_half, originalPoints.at(k).y));
                }
            }
        }

        if(temp_result.size() > 0)
        {
            nicelyGroupedPoints.push_back(temp_result);
        }
    }

    */


    points = nicelyGroupedPoints;
    return false; //everything as usual, no further information
}
Esempio n. 19
0
Mat FindContour::curveSmooth(Mat &contourImg,
                int WIN, // half window size for laplacian smoothing
                vector<Point> &border,
                vector<Point> &smooth,
                /*Point cntoid*/vector<Point> &convHull )
{
//    if(contourImg.type() != CV_8UC1){
//        cvtColor( contourImg, contourImg, CV_BGR2GRAY );
//    }
    double width = contourImg.cols;
    double height = contourImg.rows;

    Moments mu = moments(convHull);
//    Moments mu = moments(border);
    Point cntoid = Point2f(mu.m10/mu.m00/* + rect.x*/, mu.m01/mu.m00/* +rect.y*/);

    double d_min = max(width, height)*max(width, height);
    vector<polarPoint> border_polar;
    for (unsigned int n = 0; n < border.size(); n++)
    {
        //find the polar coordinates of the border;
        border_polar.push_back(cartesianToPolar(cntoid, border[n]));

        //find the nearest point to the center on the border
        double d = dist(cntoid, border[n]);
        if(d < d_min){
            d_min = d;
        }
    }
    d_min = sqrt(d_min);

    // sort border_polar by theta
    sort(border_polar.begin(), border_polar.end(), sortByTheta);

    // Laplacian smoothing
    unsigned int border_size = border_polar.size();
    for(unsigned int n = 0; n < border_size; n++){
        //cout << border_polar[n].r << " " << border_polar[n].theta << "  ";

        double avg = 0;
        for(int w = -WIN; w < WIN; w++){
            unsigned int pos = std::fabs((w+n+border_size)%border_size);
            //cout << " pos " << pos << "  ";
            avg += border_polar[pos].r;
        }
        avg = avg/WIN/2;
        polarPoint polar;
        polar.r = avg;
        polar.theta = border_polar[n].theta;
        //cout << polar.r << " " << polar.theta << " ";
        Point p = polarToCartesian(cntoid, polar);
        //circle(color, p, 1, Scalar(255, 255, 0));
        smooth.push_back(p);
        //cout << p.x << " " << p.y << "\n";
    }

    Mat smoothCircle = Mat::zeros(height, width, CV_8UC1);
    fillConvexPoly(smoothCircle, smooth, Scalar(255));
//    fillPoly(smoothCircle, smooth, Scalar(255));
    //imshow("smoothCircle", smoothCircle);

    return smoothCircle;
}
Esempio n. 20
0
//Thread d'initialisation
void *drawingAndParam(void * arg)
{
	string winParametrage = "Thresholded";
	string winDetected = "Parametrages";
	char key;
	drawing = false;
	onDrawing = true;
	pthread_mutex_init(&mutexVideo, NULL);
#if output_video == ov_remote_ffmpeg
	int errorcode = avformat_open_input(&pFormatCtx, "tcp://192.168.1.1:5555", NULL, NULL);
	if (errorcode < 0) {
		cout << "ERREUR CAMERA DRONE!!!" << errorcode;
		return 0;
	}
	avformat_find_stream_info(pFormatCtx, NULL);
	av_dump_format(pFormatCtx, 0, "tcp://192.168.1.1:5555", 0);
	pCodecCtx = pFormatCtx->streams[0]->codec;
	AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL) {
		cout << "ERREUR avcodec_find_decoder!!!";
		return 0;
	}
	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
		cout << "ERREUR avcodec_open2!!!";
		return 0;
	}
	//pFrame = av_frame_alloc();
	//pFrameBGR = av_frame_alloc();
	pFrame = avcodec_alloc_frame();
	pFrameBGR = avcodec_alloc_frame();
	bufferBGR = (uint8_t*)av_mallocz(avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height) * sizeof(uint8_t));
	avpicture_fill((AVPicture*)pFrameBGR, bufferBGR, PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);
	pConvertCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_BGR24, SWS_SPLINE, NULL, NULL, NULL);
	img = cvCreateImage(cvSize(pCodecCtx->width, (pCodecCtx->height == 368) ? 360 : pCodecCtx->height), IPL_DEPTH_8U, 3);
	if (!img) {
		cout << "ERREUR PAS D'IMAGE!!!";
		return 0;
	}

	pthread_t ii;
	pthread_create(&ii, NULL, getimg, NULL);

#else	
	VideoCapture cap(0); //capture video webcam

#endif
	HH=179;LS=1;HS=255;LV=1;HV=255;LH=1;
	namedWindow(winDetected, CV_WINDOW_NORMAL);
	Mat frame;
	setMouseCallback(winDetected, MouseCallBack, NULL);
	while(true)
	{	
		if(onDrawing) //Tant que l'utilisateur ne commence pas la sélection!
		{
			#if output_video != ov_remote_ffmpeg
				bool bSuccess = cap.read(frame); // Nouvelle capture
			if (!bSuccess) {
				cout << "Impossible de lire le flux video" << endl;
				break;
			}
			#else
				pthread_mutex_lock(&mutexVideo);
				memcpy(img->imageData, pFrameBGR->data[0], pCodecCtx->width * ((pCodecCtx->height == 368) ? 360 : pCodecCtx->height) * sizeof(uint8_t) * 3);
				pthread_mutex_unlock(&mutexVideo);
				frame = cv::cvarrToMat(img, true);
			#endif
		imshow(winDetected, frame);
		}
		if(!onDrawing && !drawing) //On affiche en direct la sélection de l'utilisateur
		{
			Mat tmpFrame=frame.clone();
			rectangle(tmpFrame, rec, CV_RGB(51,156,204),1,8,0);
			imshow(winDetected, tmpFrame);
		}
		if(drawing) //L'utilisateur a fini de sélectionner
		{
			//cible Ball(1);
			namedWindow(winParametrage, CV_WINDOW_NORMAL);
			setMouseCallback(winDetected, NULL, NULL);	
			rectangle(frame, rec, CV_RGB(51,156,204),2,8,0);
			imshow(winDetected, frame);
			Mat selection = frame(rec);
			Ball.setPicture(selection);
			while(key != 'q')
			{
				//Trackbar pour choix de la couleur
				createTrackbar("LowH", winParametrage, &LH, 179); //Hue (0 - 179)
				createTrackbar("HighH", winParametrage, &HH, 179);
				//Trackbar pour Saturation comparer au blanc
				createTrackbar("LowS", winParametrage, &LS, 255); //Saturation (0 - 255)
				createTrackbar("HighS", winParametrage, &HS, 255);
				//Trackbar pour la lumminosite comparer au noir
				createTrackbar("LowV", winParametrage, &LV, 255);//Value (0 - 255)
				createTrackbar("HighV", winParametrage, &HV, 255);
				Mat imgHSV;

				cvtColor(selection, imgHSV, COLOR_BGR2HSV); //Passe de BGR a HSV

				Mat imgDetection;

				inRange(imgHSV, Scalar(LH, LS, LV), Scalar(HH, HS, HV), imgDetection); //Met en noir les parties non comprises dans l'intervalle de la couleur choisie par l'utilisateur

				//Retire les bruits
				erode(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
				dilate(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

				dilate(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
				erode(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

				imshow(winParametrage, imgDetection);

				//Calcul de la "distance" à la cible. On s'en sert comme seuil.
				Moments position;
				position = moments(imgDetection);
				Ball.lastdZone = position.m00;

				key = waitKey(10);
			}
			
			//Extraction des points d'intérêts de la sélection de l'utilisateur
			Mat graySelect;
			int minHessian = 800;
			cvtColor(selection, graySelect, COLOR_BGR2GRAY);
			Ptr<SURF> detector = SURF::create(minHessian);
			vector<KeyPoint> KP;
			detector->detect(graySelect, KP);
			Mat KPimg;
			drawKeypoints(graySelect, KP, KPimg, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
			Mat desc;
			Ptr<SURF> extractor = SURF::create();
			extractor->compute(graySelect, KP, desc);
			Ball.setimgGray(graySelect);
			Ball.setKP(KP);
			Ball.setDesc(desc);
			break;
		}
		key = waitKey(10);
	}
	//Fin de l'initiatlisation on ferme toutes les fenêtres et on passe au tracking
	destroyAllWindows();
#if output_video != ov_remote_ffmpeg
	cap.release();
#endif
}
Esempio n. 21
0
void FindContour::singleCellDetection(const Mat &img, vector<Point> &cir_org,
                                      Mat &dispImg1, Mat &dispImg2,
                                      int &area, int &perimeter,
                                      Point2f &ctroid, float &shape,
                                      Mat &cell_alpha, // only the area inside cell (without background)
                                      vector<Point> &smooth_contour_curve, // relative position (without counting rect.x and rect.y)
                                      vector<Point> &smooth_contour_curve_abs, // absolut position
                                      Mat &blebsImg,
                                      Rect &rectangle,
                                      //vector<int> &blebs,
                                      int &frameNum)
{
    frame = &img;

    vector<Point> cir; //***global coordinates of circle***
    //cout << "[";
    for(unsigned int i = 0; i < cir_org.size(); i++){
        cir.push_back(Point(cir_org[i].x / scale, cir_org[i].y / scale));
        //cout << int(cir_org[i].x / scale) << ", " << int(cir_org[i].y / scale) << "; ";
    }
    //cout << "]" << endl;

    //enlarge the bounding rect by adding a margin (e) to it
    rect = enlargeRect(boundingRect(Mat(cir)), 5, img.cols, img.rows);
    //cout << "rect_roi " << boundingRect(Mat(cir)) << "\n";
    //cout << "enlarged rect " << rect << endl;

    dispImg1 = (*frame)(rect).clone();

    Mat sub; //*** the rectangle region of ROI (Gray) ***
    cv::cvtColor(dispImg1, sub, CV_RGB2GRAY);
    int width = sub.cols;
    int height = sub.rows;

    rectangle = rect;

//    Mat canny;
//    CannyWithBlur(sub, canny);
//    imshow("canny", canny);

    vector<Point> circle_ROI; //***local coordinates of circle***
    for (unsigned int i = 0; i < cir.size(); i++){
        Point p = Point(cir[i].x - rect.x, cir[i].y - rect.y);
        circle_ROI.push_back(p);
    }

    Mat adapThreshImg1 = Mat::zeros(height, width, sub.type());
    //image edge detection for the sub region (roi rect)
    adaptiveThreshold(sub, adapThreshImg1, 255.0, ADAPTIVE_THRESH_GAUSSIAN_C,
                          CV_THRESH_BINARY_INV, blockSize, constValue);
    //imshow("adapThreshImg1", adapThreshImg1);

    // dilation and erosion
    Mat dilerod;
    dilErod(adapThreshImg1, dilerod, dilSize);

    //display image 2 -- dilerod of adaptive threshold image
    GaussianBlur(dilerod, dilerod, Size(3, 3), 2, 2 );

    //mask for filtering out the cell of interest
    Mat mask_conv = Mat::zeros(height, width, CV_8UC1);
    fillConvexPoly(mask_conv, circle_ROI, Scalar(255));
    //imshow("mask_before", mask_conv);

    //dilate the mask -> region grows
    Mat mask_conv_dil;
    Mat element = getStructuringElement( MORPH_ELLIPSE,
                                         Size( 2*dilSize+1, 2*dilSize+1 ),
                                         Point(dilSize,dilSize) );
    dilate(mask_conv, mask_conv_dil, element);
    //imshow("mask_dil", mask_conv_dil);

    //bitwise AND on mask and dilerod
    bitwise_and(mask_conv_dil, dilerod, dispImg2);

    // findcontours
    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;
    unsigned int largest_contour_index;
    dilErodContours(dispImg2, contours, hierarchy, largest_contour_index, perimeter, dispImg1);

    // find the area of the cell by counting the white area inside the largest contour
    Mat cellArea = Mat::zeros(height, width, CV_8UC1);
    drawContours(cellArea, contours, largest_contour_index, Scalar(255), -1, 8, hierarchy, 0, Point() );
    //imshow("cellArea", cellArea);
    area = countNonZero(cellArea);

    //cout << "frame " << frameNum << "\n";
    //cout << contours[largest_contour_index] << endl;

    //renew circle points as the convex hull
    vector<Point> convHull;
    convexHull(contours[largest_contour_index], convHull);

    // find the centroid of the contour
    Moments mu = moments(contours[largest_contour_index]);
    ctroid = Point2f(mu.m10/mu.m00 + rect.x, mu.m01/mu.m00 + rect.y);

    // find the shape of the cell by the largest contour and centroid
    shape = findShape(ctroid, contours[largest_contour_index]);

    ////---- draw largest contour start ----
    //draw the largest contour
    Mat borderImg = Mat::zeros(height, width, CV_8UC1);
    drawContours(borderImg, contours, largest_contour_index, Scalar(255), 1, 8, hierarchy, 0, Point());
    //QString cellFileName0 = "border" + QString::number(frameNum) + ".png";
    //imwrite(cellFileName0.toStdString(), borderImg);


    Mat cell;
    bitwise_and(cellArea, sub, cell);
    //cell_alpha = createAlphaMat(cell);  // cell image with exactly the contour detected
    //vector<int> compression_params;
    //compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION);
    //compression_params.push_back(9);
//    QString cellFileName1 = "cell" + QString::number(frameNum) + ".png";
//    imwrite(cellFileName1.toStdString(), cell_alpha, compression_params);
    ////---- draw largest contour end ----

    // find the number and the sizes of blebs of the cell
    Mat smooth;
    vector<Point> smoothCurve;
    int WIN = 25;
    smooth = curveSmooth(borderImg, WIN, contours[largest_contour_index], smoothCurve, convHull);
    //smooth = curveSmooth(borderImg, WIN, contours[largest_contour_index], smoothCurve, ctroid/*Point(ctroid.x, ctroid.y)*/);
    //drawPointVectors(dispImg1, smoothCurve, 1, Scalar(159, 120, 28));


    Mat smooth_contour;
    int w = 10;
    smooth_contour = curveSmooth(borderImg, w, contours[largest_contour_index], smooth_contour_curve, convHull);
    //smooth_contour = curveSmooth(borderImg, w, contours[largest_contour_index], smooth_contour_curve, ctroid/*Point(ctroid.x, ctroid.y)*/);
    //imshow("smooth_contour", smooth_contour);

    for(unsigned int i = 0; i < smooth_contour_curve.size(); i++){
         Point p(smooth_contour_curve[i].x + rect.x, smooth_contour_curve[i].y + rect.y);
         smooth_contour_curve_abs.push_back(p);
    }

//    cout << "ctroid X " << ctroid.x << " Y " << ctroid.y << endl;
////    for(unsigned int i = 0; i < contours[largest_contour_index].size(); i++)
////        cout << "(" << contours[largest_contour_index][i].x + rect.x << ", " << contours[largest_contour_index][i].y + rect.y << ") ";
////    cout << endl;
//    for(unsigned int i = 0; i < smooth_contour_curve_abs.size(); i++)
//        cout << "(" << smooth_contour_curve_abs[i].x << ", " << smooth_contour_curve_abs[i].y << ") ";
//    cout << endl;

    //cout << mask_conv_dil.type() << " " << sub.type() << endl;
    Mat cell_convex;
    bitwise_and(smooth_contour, sub, cell_convex);
    cell_alpha = createAlphaMat(cell_convex);
//    imshow("cell_convex_contour", cell_alpha);
    dispImg2 = cell_convex.clone();

    //change dispImg2 from gray to rgb for displaying
    cvtColor(dispImg2, dispImg2, CV_GRAY2RGB);

    bitwise_not(smooth, smooth);
    //Mat blebsImg;
    bitwise_and(smooth, cellArea, blebsImg);
//    imshow("blebs", blebsImg);
//    QString cellFileName2 = "blebs" + QString::number(frameNum) + ".png";
//    imwrite(cellFileName2.toStdString(), blebs);

    //QString cellFileName2 = "dispImg1" + QString::number(frameNum) + ".png";
    //imwrite(cellFileName2.toStdString(), dispImg1);

    cir_org.clear();
    for(unsigned int i = 0; i < convHull.size(); i++)
        cir_org.push_back(Point((convHull[i].x + rect.x)*scale, (convHull[i].y + rect.y)*scale));
}
Esempio n. 22
0
void *opencv(void * args)
{
	/*
	   DEBUT TRAITEMENT OPENCV
	 */
	Mat imgHSV;

	cvtColor(imgOriginal, imgHSV, COLOR_BGR2HSV); //Passe de BGR a HSV

	inRange(imgHSV, Scalar(LH, LS, LV), Scalar(HH, HS, HV), imgDetection); //Met en noir les parties non comprit dans notre intervalle pour la balle

	//Retire les petits parasite en fond
	erode(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
	dilate(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

	dilate(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
	erode(imgDetection, imgDetection, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

	int i, nlabels;
	Rect box;
	int maxArea=0;
	Mat labels;
	Mat centroids;
	Mat stats;
	
	//Calcul des différentes composantes connexes de l'image
	nlabels=connectedComponentsWithStats(imgDetection, labels, stats, centroids, 4, CV_32S);

	//On recherche la composante connexe la plus grande
	for(i=1; i<(int)nlabels;i++)
	{
		int *row = (int *) &stats.at<int>(i,0);
		//printf("i : %d, mon area %d vs %d max \n", i, row[CC_STAT_AREA], maxArea);
		if(row[CC_STAT_AREA]>maxArea)
		{
			box = Rect(row[CC_STAT_LEFT], row[CC_STAT_TOP], row[CC_STAT_WIDTH], row[CC_STAT_HEIGHT]);
			maxArea=row[CC_STAT_AREA];
		}
	}

	Moments position;
	//cout << maxArea << endl << (int)(Ball.lastdZone*0.3) << endl;
	//Si la composante connexe n'est pas assez grande ce n'est pas l'objet
	if(maxArea>200)//(int)(0.3*Ball.lastdZone))
	{
		Ball.setFoundCV(true);
		rectangle(imgOriginal, box, Scalar(0,255,0), 4, 8, 0);

		//Calcule l emplacement de l objet
		position = moments(imgDetection(box));

		double y = position.m01; //y
		double x = position.m10; //x
		double dZone = position.m00; //z
		//cout << "dZone " << dZone << endl << "LdZone " << Ball.lastdZone << endl;

		posX = x / dZone;
		posY = y / dZone;
		posX+=box.x;
		posY+=box.y;
		

		int posZ=0;
		if(dZone>Ball.lastdZone+Ball.lastdZone*0.2)
		{
			posZ=-1; //Trop près de l'objet, il faut reculer.
		}
		else if(dZone > Ball.lastdZone-Ball.lastdZone*0.2 && dZone < Ball.lastdZone+Ball.lastdZone*0.2)
		{
			 posZ=0; //On est à distance correcte de l'objet
		}
		else
		{
			posZ=1; //Trop loin de l'objet, il faut avancer.
		}
		 Ball.setCurrentCV((float)posX/fSize.width*100,(float)posY/fSize.height*100, (float)posZ);
	}
	else
	{
		if(activate) {//On passe ici quand la zone détectée est trop petite ou que l'on en détecte pas.
			//AtCmd::sendMovement(0, 0, 0, 0, 0); // CHANGE
		}
		Ball.setFoundCV(false);
	}

	/*
	   FIN TRAITEMENT OPENCV
	 */
	return NULL;
}
/** @function thresh_callback */
void thresh_callback(Mat src_gray, Mat& drawing, double scale, Size& ssize)
{
	RNG rng(12345);
	int thresh = 100;
	ofstream fout("larget_contour_area.txt");

	Mat canny_output;
	vector<vector<Point> > contours;
	vector<Vec4i> hierarchy;

	double max_contour_area(0.0);
	int largest_contour_index(0);

	/// Detect edges using canny
	Canny(src_gray, canny_output, thresh, thresh * 2, 3);
	/// Find contours
	findContours(canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));

	/// Get the moments
	vector<Moments> mu(contours.size());
	for (int i = 0; i < contours.size(); i++)
	{
		mu[i] = moments(contours[i], false);
		//cout << "# of contour points: " << contours[i].size() << endl;
		for (int j = 0; j < contours[i].size(); j++)
		{
			//cout << "Point(x,y)=" <<i<<" j "<<j<<" "<< contours[i][j] << endl;
		}
	}

	///  Get the mass centers:
	vector<Point2f> mc(contours.size());
	for (int i = 0; i < contours.size(); i++)
	{
		mc[i] = Point2f(mu[i].m10 / mu[i].m00, mu[i].m01 / mu[i].m00);
	}

	//----------- Find the convex hull object for each contour
	vector<vector<Point>>hull(contours.size());
	for (int i = 0; i < contours.size(); i++){
		convexHull(Mat(contours[i]), hull[i], false);
	}

	//------------ Draw contours
	drawing = Mat::zeros(canny_output.size(), CV_8UC3);
	//Size ssize;

	ssize = Size((int)(drawing.size().width * scale), (int)(drawing.size().height*scale));
	//the dst image size,e.g.100x100

	// Calculate the area with the moments 00 and compare with the result of the OpenCV function
	//printf("\t Info: Area and Contour Length \n");

	//cout << "contours.size() " << contours.size() << endl;
	double countour_Area(0.0), arc_Length(0.0);

	for (int i = 0; i < contours.size(); i++)
	{
		countour_Area = (double)contourArea(contours[i]);
		arc_Length = (double)arcLength(contours[i], true);

		//cout << "contourArea [" << i << "] " << ": Moment " << mu[i].m00 
		//	 << " OpenCV " << countour_Area << " arcLength " << arc_Length << endl;

		//cout << "countour_Area "<< countour_Area << " " << endl;
		if (countour_Area > max_contour_area){
			max_contour_area = countour_Area;
			largest_contour_index = i;
		}

		//------- draw all contour ---------------
		//Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
		//drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, Point());
		//circle(drawing, mc[i], 4, color, -1, 8, 0);

	}
	//------- draw largest contour ---------------
	if (contours.size() > 0){
		Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
		drawContours(drawing, contours, largest_contour_index, color, 1, 8, hierarchy, 0, Point());
		circle(drawing, mc[largest_contour_index], 4, color, -1, 8, 0);
		drawContours(drawing, hull, largest_contour_index, color, 1, 8, vector<Vec4i>(), 0, Point());
	}
	fout << max_contour_area << endl;
	cout << "max_contour_area " << max_contour_area << endl;

}
Esempio n. 24
0
int main()
{
	cv::Mat imCalibColor;	
	cv::Mat imCalibGray;	
	cv::vector<cv::vector<cv::Point> > contours;
	cv::vector<cv::Vec4i> hierarchy;
	cv::vector<cv::Point2f> pointQR;
	cv::Mat imCalibNext;
	cv::Mat imQR;
	cv::vector<cv::Mat> tabQR;
	/*cv::vector<cv::Point2f> corners1;
	cv::vector<cv::Point2f> corners2;
	cv::vector<cv::Point2f> corners3;
	cv::vector<cv::Point2f> corners4;
	cv::vector<cv::Point2f> corners5;*/

	double qualityLevel = 0.01;
	double minDistance = 10;
	int blockSize = 3;
	bool useHarrisDetector = false;
	double k = 0.04;
	int maxCorners = 600;

	int A = 0, B= 0, C= 0;
	char key;
	int mark;
	bool patternFound = false;
	
	cv::VideoCapture vcap("../rsc/capture2.avi");

	for (int i = 1; i < 5; i++)
	{
		std::ostringstream oss;
		oss << "../rsc/QrCodes/QR" << i << ".jpg";
		imQR = cv::imread(oss.str());
		cv::cvtColor(imQR, imQR, CV_BGR2GRAY);
		std::cout<< "Bouh!!!!!!" << std::endl;
		tabQR.push_back(imQR);
	}

	do
	{
		while(imCalibColor.empty())
		{
			vcap >> imCalibColor;
		}
		vcap >> imCalibColor;

		cv::Mat edges(imCalibColor.size(),CV_MAKETYPE(imCalibColor.depth(), 1));
		cv::cvtColor(imCalibColor, imCalibGray, CV_BGR2GRAY);
		Canny(imCalibGray, edges, 100 , 200, 3);

		cv::findContours( edges, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE);
		
		cv::imshow("pointInteret", imCalibColor);

		mark = 0;

		cv::vector<cv::Moments> mu(contours.size());
  		cv::vector<cv::Point2f> mc(contours.size());

		for( int i = 0; i < contours.size(); i++ )
		{	
			mu[i] = moments( contours[i], false ); 
			mc[i] = cv::Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 );
		}

		for( int i = 0; i < contours.size(); i++ )
		{
			int k=i;
			int c=0;

			while(hierarchy[k][2] != -1)
			{
				k = hierarchy[k][2] ;
				c = c+1;
			}
			if(hierarchy[k][2] != -1)
			c = c+1;

			if (c >= 5)
			{	
				if (mark == 0)		A = i;
				else if  (mark == 1)	B = i;		// i.e., A is already found, assign current contour to B
				else if  (mark == 2)	C = i;		// i.e., A and B are already found, assign current contour to C
				mark = mark + 1 ;
			}
		} 

		if (A !=0 && B !=0 && C!=0)
		{

			cv::Mat imagecropped = imCalibColor;
			cv::Rect ROI(280/*pointQR[0].x*/, 260/*pointQR[0].y*/, 253, 218);
			cv::Mat croppedRef(imagecropped, ROI);
			cv::cvtColor(croppedRef, imagecropped, CV_BGR2GRAY);
			cv::threshold(imagecropped, imagecropped, 180, 255, CV_THRESH_BINARY);

			pointQR.push_back(mc[A]);
			cv::circle(imCalibColor, cv::Point(pointQR[0].x, pointQR[0].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);
			pointQR.push_back(mc[B]);
			cv::circle(imCalibColor, cv::Point(pointQR[1].x, pointQR[1].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);
			pointQR.push_back(mc[C]);
			cv::circle(imCalibColor, cv::Point(pointQR[2].x, pointQR[2].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);

			cv::Point2f D(0.0f,0.0f);
			cv::Point2f E(0.0f,0.0f);
			cv::Point2f F(0.0f,0.0f);

			D.x = (mc[A].x + mc[B].x)/2;
			E.x = (mc[B].x + mc[C].x)/2;
			F.x = (mc[C].x + mc[A].x)/2;

			D.y = (mc[A].y + mc[B].y)/2;
			E.y = (mc[B].y + mc[C].y)/2;
			F.y = (mc[C].y + mc[A].y)/2;

			pointQR.push_back(D);
			cv::circle(imCalibColor, cv::Point(pointQR[3].x, pointQR[3].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);
			pointQR.push_back(E);
			cv::circle(imCalibColor, cv::Point(pointQR[4].x, pointQR[4].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);
			pointQR.push_back(F);
			cv::circle(imCalibColor, cv::Point(pointQR[5].x, pointQR[5].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);

			patternFound = true;
			std::cout << "patternfound" << std::endl;
			
			cv::SiftFeatureDetector detector;
			cv::vector<cv::KeyPoint> keypoints1, keypoints2;
			detector.detect(tabQR[3], keypoints1);
			detector.detect(imagecropped, keypoints2);

			cv::Ptr<cv::DescriptorExtractor> descriptor = cv::DescriptorExtractor::create("SIFT");
			cv::Mat descriptors1, descriptors2;
			descriptor->compute(tabQR[3], keypoints1, descriptors1 );
			descriptor->compute(imagecropped, keypoints2, descriptors2 );

			cv::FlannBasedMatcher matcher; 
			std::vector< cv::DMatch > matches; 
			matcher.match( descriptors1, descriptors2, matches ); 
			double max_dist = 0; double min_dist = 100;

			for( int i = 0; i < descriptors1.rows; i++ ) 
			{ 
				double dist = matches[i].distance; 
				if( dist < min_dist ) min_dist = dist; 
				if( dist > max_dist ) max_dist = dist; 
			}

			std::vector< cv::DMatch > good_matches;
			for( int i = 0; i < descriptors1.rows; i++ ) 
				if( matches[i].distance <= 2*min_dist ) 
					good_matches.push_back( matches[i]); 
			cv::Mat imgout; 
			drawMatches(tabQR[3], keypoints1, imagecropped, keypoints2, good_matches, imgout); 

			std::vector<cv::Point2f> pt_img1; 
			std::vector<cv::Point2f> pt_img2; 
			for( int i = 0; i < (int)good_matches.size(); i++ ) 
			{ 
				pt_img1.push_back(keypoints1[ good_matches[i].queryIdx ].pt ); 
				pt_img2.push_back(keypoints2[ good_matches[i].trainIdx ].pt ); 
			}
			cv::Mat H = findHomography( pt_img1, pt_img2, CV_RANSAC );

			cv::Mat result; 
			warpPerspective(tabQR[3],result,H,cv::Size(tabQR[3].cols+imagecropped.cols,tabQR[3].rows)); 
			cv::Mat half(result,cv::Rect(0,0,imagecropped.cols,imagecropped.rows)); 
			imagecropped.copyTo(half); 
			imshow( "Result", result );

			break;
		}

		key = (char)cv::waitKey(67);
	}while(patternFound != true && key != 27);

	if(patternFound)
		imCalibNext = imCalibColor;
	
	return patternFound;

}
  void do_work(const sensor_msgs::ImageConstPtr& msg, const std::string input_frame_from_msg)
  {
    // Work on the image.
    try
    {
      // Convert the image into something opencv can handle.
      cv::Mat frame = cv_bridge::toCvShare(msg, msg->encoding)->image;

      // Messages
      opencv_apps::MomentArrayStamped moments_msg;
      moments_msg.header = msg->header;

      // Do the work
      cv::Mat src_gray;
      /// Convert image to gray and blur it
      cv::cvtColor( frame, src_gray, cv::COLOR_BGR2GRAY );
      cv::blur( src_gray, src_gray, cv::Size(3,3) );

      /// Create window
      if( debug_view_) {
        cv::namedWindow( window_name_, cv::WINDOW_AUTOSIZE );
      }

      cv::Mat canny_output;
      std::vector<std::vector<cv::Point> > contours;
      std::vector<cv::Vec4i> hierarchy;
      cv::RNG rng(12345);

      /// Detect edges using canny
      cv::Canny( src_gray, canny_output, low_threshold_ , low_threshold_ *2, 3 );
      /// Find contours
      cv::findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );

      /// Get the moments
      std::vector<cv::Moments> mu(contours.size() );
      for( size_t i = 0; i < contours.size(); i++ )
      { mu[i] = moments( contours[i], false ); }

      ///  Get the mass centers:
      std::vector<cv::Point2f> mc( contours.size() );
      for( size_t i = 0; i < contours.size(); i++ )
      { mc[i] = cv::Point2f( static_cast<float>(mu[i].m10/mu[i].m00) , static_cast<float>(mu[i].m01/mu[i].m00) ); }

      /// Draw contours
      cv::Mat drawing = cv::Mat::zeros( canny_output.size(), CV_8UC3 );
      for( size_t i = 0; i< contours.size(); i++ )
      {
        cv::Scalar color = cv::Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
        cv::drawContours( drawing, contours, (int)i, color, 2, 8, hierarchy, 0, cv::Point() );
        cv::circle( drawing, mc[i], 4, color, -1, 8, 0 );
      }

      /// Calculate the area with the moments 00 and compare with the result of the OpenCV function
      printf("\t Info: Area and Contour Length \n");
      for( size_t i = 0; i< contours.size(); i++ )
      {
        printf(" * Contour[%d] - Area (M_00) = %.2f - Area OpenCV: %.2f - Length: %.2f \n", (int)i, mu[i].m00, cv::contourArea(contours[i]), cv::arcLength( contours[i], true ) );
        cv::Scalar color = cv::Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
        cv::drawContours( drawing, contours, (int)i, color, 2, 8, hierarchy, 0, cv::Point() );
        cv::circle( drawing, mc[i], 4, color, -1, 8, 0 );
        
        opencv_apps::Moment moment_msg;
        moment_msg.m00 = mu[i].m00;
        moment_msg.m10 = mu[i].m10;
        moment_msg.m01 = mu[i].m01;
        moment_msg.m20 = mu[i].m20;
        moment_msg.m11 = mu[i].m11;
        moment_msg.m02 = mu[i].m02;
        moment_msg.m30 = mu[i].m30;
        moment_msg.m21 = mu[i].m21;
        moment_msg.m12 = mu[i].m12;
        moment_msg.m03 = mu[i].m03;
        moment_msg.mu20 = mu[i].mu20;
        moment_msg.mu11 = mu[i].mu11;
        moment_msg.mu02 = mu[i].mu02;
        moment_msg.mu30 = mu[i].mu30;
        moment_msg.mu21 = mu[i].mu21;
        moment_msg.mu12 = mu[i].mu12;
        moment_msg.mu03 = mu[i].mu03;
        moment_msg.nu20 = mu[i].nu20;
        moment_msg.nu11 = mu[i].nu11;
        moment_msg.nu02 = mu[i].nu02;
        moment_msg.nu30 = mu[i].nu30;
        moment_msg.nu21 = mu[i].nu21;
        moment_msg.nu12 = mu[i].nu12;
        moment_msg.nu03 = mu[i].nu03;
        opencv_apps::Point2D center_msg;
        center_msg.x = mc[i].x;
        center_msg.y = mc[i].y;
        moment_msg.center = center_msg;
        moment_msg.area = cv::contourArea(contours[i]);
        moment_msg.length = cv::arcLength(contours[i], true);
        moments_msg.moments.push_back(moment_msg);
      }

      if( debug_view_) {
        cv::imshow( window_name_, drawing );
        int c = cv::waitKey(1);
      }

      // Publish the image.
      sensor_msgs::Image::Ptr out_img = cv_bridge::CvImage(msg->header, msg->encoding, drawing).toImageMsg();
      img_pub_.publish(out_img);
      msg_pub_.publish(moments_msg);
    }
    catch (cv::Exception &e)
    {
      NODELET_ERROR("Image processing error: %s %s %s %i", e.err.c_str(), e.func.c_str(), e.file.c_str(), e.line);
    }

    prev_stamp_ = msg->header.stamp;
  }
Esempio n. 26
0
cv::RotatedRect cv::CamShift( InputArray _probImage, Rect& window,
                              TermCriteria criteria )
{
    CV_INSTRUMENT_REGION()

    const int TOLERANCE = 10;
    Size size;
    Mat mat;
    UMat umat;
    bool isUMat = _probImage.isUMat();

    if (isUMat)
        umat = _probImage.getUMat(), size = umat.size();
    else
        mat = _probImage.getMat(), size = mat.size();

    meanShift( _probImage, window, criteria );

    window.x -= TOLERANCE;
    if( window.x < 0 )
        window.x = 0;

    window.y -= TOLERANCE;
    if( window.y < 0 )
        window.y = 0;

    window.width += 2 * TOLERANCE;
    if( window.x + window.width > size.width )
        window.width = size.width - window.x;

    window.height += 2 * TOLERANCE;
    if( window.y + window.height > size.height )
        window.height = size.height - window.y;

    // Calculating moments in new center mass
    Moments m = isUMat ? moments(umat(window)) : moments(mat(window));

    double m00 = m.m00, m10 = m.m10, m01 = m.m01;
    double mu11 = m.mu11, mu20 = m.mu20, mu02 = m.mu02;

    if( fabs(m00) < DBL_EPSILON )
        return RotatedRect();

    double inv_m00 = 1. / m00;
    int xc = cvRound( m10 * inv_m00 + window.x );
    int yc = cvRound( m01 * inv_m00 + window.y );
    double a = mu20 * inv_m00, b = mu11 * inv_m00, c = mu02 * inv_m00;

    // Calculating width & height
    double square = std::sqrt( 4 * b * b + (a - c) * (a - c) );

    // Calculating orientation
    double theta = atan2( 2 * b, a - c + square );

    // Calculating width & length of figure
    double cs = cos( theta );
    double sn = sin( theta );

    double rotate_a = cs * cs * mu20 + 2 * cs * sn * mu11 + sn * sn * mu02;
    double rotate_c = sn * sn * mu20 - 2 * cs * sn * mu11 + cs * cs * mu02;
    double length = std::sqrt( rotate_a * inv_m00 ) * 4;
    double width = std::sqrt( rotate_c * inv_m00 ) * 4;

    // In case, when tetta is 0 or 1.57... the Length & Width may be exchanged
    if( length < width )
    {
        std::swap( length, width );
        std::swap( cs, sn );
        theta = CV_PI*0.5 - theta;
    }

    // Saving results
    int _xc = cvRound( xc );
    int _yc = cvRound( yc );

    int t0 = cvRound( fabs( length * cs ));
    int t1 = cvRound( fabs( width * sn ));

    t0 = MAX( t0, t1 ) + 2;
    window.width = MIN( t0, (size.width - _xc) * 2 );

    t0 = cvRound( fabs( length * sn ));
    t1 = cvRound( fabs( width * cs ));

    t0 = MAX( t0, t1 ) + 2;
    window.height = MIN( t0, (size.height - _yc) * 2 );

    window.x = MAX( 0, _xc - window.width / 2 );
    window.y = MAX( 0, _yc - window.height / 2 );

    window.width = MIN( size.width - window.x, window.width );
    window.height = MIN( size.height - window.y, window.height );

    RotatedRect box;
    box.size.height = (float)length;
    box.size.width = (float)width;
    box.angle = (float)((CV_PI*0.5+theta)*180./CV_PI);
    while(box.angle < 0)
        box.angle += 360;
    while(box.angle >= 360)
        box.angle -= 360;
    if(box.angle >= 180)
        box.angle -= 180;
    box.center = Point2f( window.x + window.width*0.5f, window.y + window.height*0.5f);

    return box;
}
Esempio n. 27
0
int ProcessFrame(cv::Mat *frame, cv::Mat *fg_mask, double tick) {

    double dT = ((tick - prev_tick ) / cv::getTickFrequency()); //seconds
    prev_tick = tick;
    if(with_fps) {
        printf("FPS ticks : %f\n", (float) 1 / dT);
    }
    cv::Mat hsv;
    cvtColor(*frame, hsv, CV_BGR2HSV);

    cv::erode(*fg_mask, *fg_mask, cv::Mat(), cv::Point(-1, -1), 5);
    cv::dilate(*fg_mask, *fg_mask, cv::Mat(), cv::Point(-1, -1), 8);

    if(with_gui) {
        cv::imshow("Threshold", *fg_mask);
    }
    vector<vector<cv::Point>> contours;
    cv::findContours(*fg_mask, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);

    vector<contour_t> found_contures;
    contour_t new_contour;
    int counter = 0;

    for (size_t i = 0; i < contours.size(); i++) {
        cv::Rect bBox;
        cv::Moments mu;
        bBox = cv::boundingRect(contours[i]);
        mu = moments( contours[i], false);

        if (bBox.area() >= min_area) {
            new_contour.id = counter;
            new_contour.contours = contours[i];
            new_contour.mu = mu;
            new_contour.contour_use = false;
            counter++;
            found_contures.push_back(new_contour);
        }
    }

    loadValidCounureToObject(found_contures, tracked_objects);                             //načítanie všetkých vzdialeností od kontur a usporiadanie

    std::sort(tracked_objects.begin(),tracked_objects.end(),comp);
    for (size_t i = 0; i < tracked_objects.size(); i++) {
        tracked_objects[i].set_index_object((int) i);
        if (tracked_objects[i].selected_counture.size() >= 1){                           //ak má objekt v okolí nejaké kontúry
            found_contures[tracked_objects[i].selected_counture[0].ID].candidate_object.push_back(tracked_objects[i]);    //pushne do contúry svoje ID
        }
    }
    for (size_t i = 0; i < tracked_objects.size(); i++) {


        int contourID = parsingContours(found_contures, tracked_objects[i]);

        if (contourID == -1){
            if (tracked_objects[i].counter() < 2) {
                tracked_objects.erase(tracked_objects.begin() + i);
                i--;
                continue;
            }
            else {
                if (!(tracked_objects[i].last_y_pos() > frame_height - frame_height / 6 ||
                        tracked_objects[i].last_y_pos() < frame_height / 6)) {
                    tracked_objects[i].kalmanMakeCalculate(*frame, dT);
                }
                else {
                    if (((tracked_objects[i].starting_y_pos() < frame_height / 2 &&
                            tracked_objects[i].last_y_pos() <  frame_height / 6 ) ||
                            (tracked_objects[i].starting_y_pos() > frame_height / 2 &&
                                    tracked_objects[i].last_y_pos() > frame_height - frame_height / 6)) &&
                            !(tracked_objects[i].change_startin_pos())) {

                        tracked_objects[i].kalmanMakeCalculate(*frame, dT);
                    }
                    else {
                        counterAbsPersonFlow((int) i);
                        tracked_objects.erase(tracked_objects.begin() + i);
                        i--;
                        continue;
                    }

                }
            }
            if (tracked_objects[i].get_usingRate() > 30) {
                counterAbsPersonFlow((int) i);
                tracked_objects.erase(tracked_objects.begin() + i);
                i--;
                continue;
            }
        }
        else{
            found_contures[contourID].contour_use = true;
            cv::MatND hist;// = CalcHistogramBase(hsv, found_contures[contourID].contours, contourID, tracked_objects[i].hist());
            tracked_objects[i].kalmanMakeCalculate(*frame, found_contures[contourID].mu, dT, hist);
            if (tracked_objects[i].starting_y_pos() < frame_height / 2 && tracked_objects[i].last_y_pos() > frame_height - frame_height / 4 ){
                if (counterAbsPersonFlow((int) i) == 0) {
                    tracked_objects[i].set_startingYpos(frame_height);
                    tracked_objects[i].set_change_startin_pos(true);
                }
            }
            if (tracked_objects[i].starting_y_pos() > frame_height / 2 && tracked_objects[i].last_y_pos() < frame_height / 4 ){
                if(counterAbsPersonFlow((int) i) == 0) {
                    tracked_objects[i].set_startingYpos(0);
                    tracked_objects[i].set_change_startin_pos(true);
                }
            }
        }
    }

    for (size_t i = 0; i < found_contures.size(); i++) {
        if (!found_contures[i].contour_use) {

            bool create = true;
            double x = found_contures[i].mu.m10 / found_contures[i].mu.m00;
            double y = found_contures[i].mu.m01 / found_contures[i].mu.m00;


            for (size_t k = 0; k < tracked_objects.size(); k++) {
                double distance = CalcDistance(x, tracked_objects[k].last_x_pos(), y, tracked_objects[k].last_y_pos());
                if (min_dist_to_create > distance) {
                    create = false;
                }
            }
            if (create) {
                kalmanCont newObject;
                newObject.set_id(id);
                newObject.set_startingYpos(y);
                newObject.set_startingXpos(x);

                cv::MatND hist;// = CalcHistogramContour(hsv, found_contures[i].contours, (int) i);
                newObject.kalmanMakeCalculate(*frame, found_contures[i].mu, dT, hist);

                tracked_objects.push_back(newObject);
                id++;
                id = (id > 10) ? 0 : id;
            }
        }
        found_contures[i].contour_use = false;
    }
    for (size_t i = 0; i < tracked_objects.size(); i++) {

        tracked_objects[i].add_usingRate();
        tracked_objects[i].set_counter();
        tracked_objects[i].clear_history_frams();
        if (with_gui) {
            cv::Point center;
            center.x = (int) tracked_objects[i].last_x_pos();
            center.y = (int) tracked_objects[i].last_y_pos();
            cv::circle(*frame, center, 2, CV_RGB(tracked_objects[i].R, tracked_objects[i].G, tracked_objects[i].B), -1);
            stringstream sstr;
            sstr << "Objekt" << tracked_objects[i].id();
            cv::putText(*frame, sstr.str(), cv::Point(center.x + 3, center.y - 3), cv::FONT_HERSHEY_SIMPLEX, 0.5,
                            CV_RGB(tracked_objects[i].R, tracked_objects[i].G, tracked_objects[i].B), 2);
        }
    }
    if (with_gui) {
        stringstream ss;
        ss << out;
        string counter1 = ss.str();
        putText(*frame, counter1.c_str(), cv::Point(5, 30), FONT_HERSHEY_SCRIPT_SIMPLEX, 1, cv::Scalar(0, 255, 0),1);

        stringstream ss2;
        ss2 << in;
        string counter2 = ss2.str();
        putText(*frame, counter2.c_str(), cv::Point(5, frame_height - 30), FONT_HERSHEY_SCRIPT_SIMPLEX, 1, cv::Scalar(0, 0, 255),1);
        cv::imshow("Tracking", *frame);
    }
    if (!with_gui){
       // printf("in: %d, out: %d\n",in,out);
    }
    return 0;

}
Esempio n. 28
0
int cv::meanShift( InputArray _probImage, Rect& window, TermCriteria criteria )
{
    CV_INSTRUMENT_REGION()

    Size size;
    int cn;
    Mat mat;
    UMat umat;
    bool isUMat = _probImage.isUMat();

    if (isUMat)
        umat = _probImage.getUMat(), cn = umat.channels(), size = umat.size();
    else
        mat = _probImage.getMat(), cn = mat.channels(), size = mat.size();

    Rect cur_rect = window;

    CV_Assert( cn == 1 );

    if( window.height <= 0 || window.width <= 0 )
        CV_Error( Error::StsBadArg, "Input window has non-positive sizes" );

    window = window & Rect(0, 0, size.width, size.height);

    double eps = (criteria.type & TermCriteria::EPS) ? std::max(criteria.epsilon, 0.) : 1.;
    eps = cvRound(eps*eps);
    int i, niters = (criteria.type & TermCriteria::MAX_ITER) ? std::max(criteria.maxCount, 1) : 100;

    for( i = 0; i < niters; i++ )
    {
        cur_rect = cur_rect & Rect(0, 0, size.width, size.height);
        if( cur_rect == Rect() )
        {
            cur_rect.x = size.width/2;
            cur_rect.y = size.height/2;
        }
        cur_rect.width = std::max(cur_rect.width, 1);
        cur_rect.height = std::max(cur_rect.height, 1);

        Moments m = isUMat ? moments(umat(cur_rect)) : moments(mat(cur_rect));

        // Calculating center of mass
        if( fabs(m.m00) < DBL_EPSILON )
            break;

        int dx = cvRound( m.m10/m.m00 - window.width*0.5 );
        int dy = cvRound( m.m01/m.m00 - window.height*0.5 );

        int nx = std::min(std::max(cur_rect.x + dx, 0), size.width - cur_rect.width);
        int ny = std::min(std::max(cur_rect.y + dy, 0), size.height - cur_rect.height);

        dx = nx - cur_rect.x;
        dy = ny - cur_rect.y;
        cur_rect.x = nx;
        cur_rect.y = ny;

        // Check for coverage centers mass & window
        if( dx*dx + dy*dy < eps )
            break;
    }

    window = cur_rect;
    return i;
}
Esempio n. 29
0
bool ShapeFilter::findCirc(cv::Mat img){
    //getting the contours
    cv::Mat canny;
    std::vector<std::vector<cv::Point> > contours;
    std::vector<cv::Vec4i> hierarchy;

    float radius;
    cv::Point2f center;
    this->radius.clear();
    this->center.clear();

    int thresh = 100;
    // Detect edges using canny
    Canny(img, canny, thresh, thresh*2, 3 );
    // Find contours
    findContours( canny, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );

    int circlesFound = 0;
    //constants
    unsigned int minPoints = 6;
    double minArea = 10;
    float minRad = 100;

    for (std::vector<cv::Point> co: contours){
        if (co.size() < minPoints){
            println("Circle Not enough Points");
            continue;
        }

        double area = cv::contourArea(co);
        if (area < minArea) {
            println ("Circle not enough area " + std::to_string(area));
            continue;
        }

        /*
        /// Get the moments
        std::vector<cv::Moments> mu(co.size() );
        for( int i = 0; i < co.size(); i++ ){
            mu[i] = moments( co[i], false );
        }

        ///  Get the mass centers:
        std::vector<cv::Point2f> mc( contours.size() );
        for( int i = 0; i < contours.size(); i++ )
        { mc[i] = cv::Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 ); }
*/

        cv::Moments cvmoments = moments(co, false);
        double nu11 = cvmoments.nu11;
        double nu20 = cvmoments.nu02;
        double nu02 = cvmoments.nu20;
        double nu21 = cvmoments.nu21;
        double nu12 = cvmoments.nu12;
        double nu03 = cvmoments.nu03;
        double nu30 = cvmoments.nu30;

        double r03 = fabs(nu30 / nu03);
        r03 = (r03 > 1) ? r03 : 1.0/r03;
        double r12 = fabs(nu12 / nu21);
        r12 = (r12 > 1) ? r12 : 1.0/r12;
        double r02 = fabs(nu02 / nu20);
        r02 = (r02 > 1) ? r02 : 1.0/r02;

        double r11 = fabs( MEAN2(nu02,nu20) / nu11);
        double R = MEAN2(nu20,nu02) / std::max((MEAN2(nu21,nu12)), (MEAN2(nu30,nu03)));
        bool pass = true;
        pass = (r03 <= 25.0) && (r12 <= 12.0) && (r02 <= 12.0) && (r11 > 2.5) && (R > 25);

        if (!pass){
            println("Circle failed math test");
            continue;
        }

        // get min enclosing circle and radius
        //CvPoint2D32f centroid32f;

        //cv::minEnclosingCircle(co, &centroid32f, &radius);
        cv::minEnclosingCircle(co, center, radius);

        if (radius > minRad || radius < 0) {
            println("Circle radius too small");
            continue;
        }

        // do checks on area and perimeter
        double area_ratio = area / (CV_PI*radius*radius);
        //double perimeter_ratio = perimeter / (2*CV_PI*radius);
        if (area_ratio < 0.7) {
            println("Circle fail Area");
            continue;
        }

        bool repeat = false;
        //check if circle is found already
        for (unsigned int i = 0; i < this->center.size(); i++){
            cv::Point2f c = this->center[i];
            if (std::abs(c.x-center.x) < 20 && std::abs(c.y - center.y) < 20){
                repeat = true;
                break;
            }
        }

        if (!repeat){
            //check if i found the number of circles requested
            if (this->center.size() < max){
                println("Found circle");
                this->radius.push_back(radius);
                this->center.push_back(center);
                circlesFound++;
            }else{
                println("Already found enough circles");
            }
        }else{
            println("Already found this circle");
        }
    }
    return circlesFound != 0;
}
Esempio n. 30
0
bool detecterQR(cv::VideoCapture vcap, std::vector<cv::Point2f> *pointsQR, std::vector<cv::Point3f> *QRpoint3D, std::vector<cv::Point3f> *tabuseless, std::vector<cv::Point3f> *QRpointObject3D, cv::Mat *imCalibNext)
{
	cv::Mat imCalibColor;	
	cv::vector<cv::vector<cv::Point> > contours;
	cv::vector<cv::Vec4i> hierarchy;
	
	int A = 0, B= 0, C= 0;
	char key;
	int mark;
	bool patternFound = false;

	do
	{
		while(imCalibColor.empty())
		{
			vcap >> imCalibColor;
		}
		vcap >> imCalibColor;

		cv::Mat edges(imCalibColor.size(),CV_MAKETYPE(imCalibColor.depth(), 1));
		cv::imshow("Projection", imCalibColor);
		cv::cvtColor(imCalibColor, imCalibColor, CV_BGR2GRAY);
		Canny(imCalibColor, edges, 100 , 200, 3);

		cv::findContours( edges, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE);

		mark = 0;

		cv::vector<cv::Moments> mu(contours.size());
  		cv::vector<cv::Point2f> mc(contours.size());

		for( int i = 0; i < contours.size(); i++ )
		{	
			mu[i] = moments( contours[i], false ); 
			mc[i] = cv::Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 );
		}

		for( int i = 0; i < contours.size(); i++ )
		{
			int k=i;
			int c=0;

			while(hierarchy[k][2] != -1)
			{
				k = hierarchy[k][2] ;
				c = c+1;
			}
			if(hierarchy[k][2] != -1)
			c = c+1;

			if (c >= 5)
			{	
				if (mark == 0)		A = i;
				else if  (mark == 1)	B = i;		// i.e., A is already found, assign current contour to B
				else if  (mark == 2)	C = i;		// i.e., A and B are already found, assign current contour to C
				mark = mark + 1 ;
			}
		} 

		if (A !=0 && B !=0 && C!=0)
		{
			(*pointsQR).push_back(mc[A]);
			(*QRpoint3D).push_back(cv::Point3f(mc[A].x,mc[A].y,0.f));
			(*tabuseless).push_back(cv::Point3f(mc[A].x,mc[A].y,0.f));
			
			(*pointsQR).push_back(mc[B]);
			(*QRpoint3D).push_back(cv::Point3f(mc[B].x,mc[B].y,0.f));
			(*tabuseless).push_back(cv::Point3f(mc[B].x,mc[B].y,0.f));
			
			(*pointsQR).push_back(mc[C]);
			(*QRpoint3D).push_back(cv::Point3f(mc[C].x,mc[C].y,0.f));
			(*tabuseless).push_back(cv::Point3f(mc[C].x,mc[C].y,0.f));

			(*QRpointObject3D).push_back(cv::Point3f(mc[A].x,mc[A].y,0.f));
			(*QRpointObject3D).push_back(cv::Point3f(mc[B].x,mc[B].y,0.f));
			(*QRpointObject3D).push_back(cv::Point3f(mc[C].x,mc[C].y,0.f));
			(*QRpointObject3D).push_back(cv::Point3f(mc[A].x,mc[A].y,100));
			(*QRpointObject3D).push_back(cv::Point3f(mc[B].x,mc[B].y,100));
			(*QRpointObject3D).push_back(cv::Point3f(mc[C].x,mc[C].y,100));

			cv::Point2f D(0.0f,0.0f);
			cv::Point2f E(0.0f,0.0f);
			cv::Point2f F(0.0f,0.0f);

			D.x = (mc[A].x + mc[B].x)/2;
			E.x = (mc[B].x + mc[C].x)/2;
			F.x = (mc[C].x + mc[A].x)/2;

			D.y = (mc[A].y + mc[B].y)/2;
			E.y = (mc[B].y + mc[C].y)/2;
			F.y = (mc[C].y + mc[A].y)/2;

			(*pointsQR).push_back(D);
			(*QRpoint3D).push_back(cv::Point3f(D.x,D.y,0.f));
			(*tabuseless).push_back(cv::Point3f(D.x,D.y,0.f));
	
			(*pointsQR).push_back(E);
			(*QRpoint3D).push_back(cv::Point3f(E.x,E.y,0.f));
			(*tabuseless).push_back(cv::Point3f(E.x,E.y,0.f));
			
			(*pointsQR).push_back(F);
			(*QRpoint3D).push_back(cv::Point3f(F.x,F.y,0.f));
			(*tabuseless).push_back(cv::Point3f(F.x,F.y,0.f));
			

			patternFound = true;
			std::cout << "patternfound" << std::endl;

			break;
		}

		key = (char)cv::waitKey(67);
	}while(patternFound != true && key != 27);

	if(patternFound)
		*imCalibNext = imCalibColor;
	
	return patternFound;
}