コード例 #1
0
ファイル: gui.cpp プロジェクト: feiting/jderobot
    void Gui::get3DPositionZ(TPinHoleCamera * camera, HPoint3D &res, HPoint2D in, float Z = 0.0) {
        HPoint2D p2d;
        HPoint3D p3d;
        float x, y, z;
        float xfinal, yfinal, zfinal;

        x = camera->position.X;
        y = camera->position.Y;
        z = camera->position.Z;

        p2d.x = in.x;
        p2d.y = in.y;
        p2d.h = in.h;

        this->pixel2optical(camera, &p2d);
        backproject(&p3d, p2d, *camera);

        /*Check division by zero*/
        if ((p3d.Z - z) == 0.0) {
            res.H = 0.0;
            return;
        }

        zfinal = Z;

        /*Linear equation (X-x)/(p3d.X-x) = (Y-y)/(p3d.Y-y) = (Z-z)/(p3d.Z-z)*/
        xfinal = x + (p3d.X - x)*(zfinal - z) / (p3d.Z - z);
        yfinal = y + (p3d.Y - y)*(zfinal - z) / (p3d.Z - z);

        res.X = xfinal;
        res.Y = yfinal;
        res.Z = zfinal;
        res.H = 1.0;
    }
コード例 #2
0
    float CurvatureFeatureExtractor::computeConvexity(core::DataSegment<float, 4> &normals, SurfaceFeatureExtractor::SurfaceFeature feature, std::vector<int> &surface, int w){
      //select extremal bins in histogram
      std::pair<utils::Point,utils::Point> histoExtremalBins = computeExtremalBins(feature);

      //backproject to image space and calculate mean
      std::pair<utils::Point,utils::Point> imgBackproject = backproject(normals, histoExtremalBins, surface, w);
      
      //scalar product to determine concave and convex
      float direction = computeConvexity(histoExtremalBins, imgBackproject);
      
      return direction;
    }
コード例 #3
0
void
myprogeo::mybackproject(float x, float y, float* xp, float* yp, float* zp, float* camx, float* camy, float* camz, int cam){
	HPoint2D p;
	HPoint3D pro;

	p.x=GRAPHIC_TO_OPTICAL_X(x,y);
	p.y=GRAPHIC_TO_OPTICAL_Y(x,y);

	p.h=1;

	backproject(&pro,p,cameras[cam]);

	*xp=pro.X;
	*yp=pro.Y;
	*zp=pro.Z;

	*camx=cameras[cam].position.X;
	*camy=cameras[cam].position.Y;
	*camz=cameras[cam].position.Z;
}
コード例 #4
0
ファイル: controller.cpp プロジェクト: AeroCano/JdeRobot
	void Controller::add_depth_pointsImage(cv::Mat distances, cv::Mat imageRGB, rgbdManualCalibrator::DrawArea* world, int cam, int scale, int colour){
	float d;

		for (int xIm=0; xIm< cWidth; xIm++){
			for (int yIm=0; yIm<cHeight ; yIm++){
				d=distances.at<float>(yIm,xIm);


				if (d != 0 ){
					float xp,yp,zp,camx,camy,camz;
					float ux,uy,uz;
					float x,y;
					float k;
					float c1x, c1y, c1z;
					float fx,fy,fz;
					float fmod;
					float t;
					float Fx,Fy,Fz;

					HPoint2D p;
					HPoint3D pro;
					p.x=GRAPHIC_TO_OPTICAL_X(xIm,yIm,cHeight);
					p.y=GRAPHIC_TO_OPTICAL_Y(xIm,yIm,cHeight);
					p.h=1;
					backproject(&pro,p,this->cameras[cam]);
					xp=pro.X;
					yp=pro.Y;
					zp=pro.Z;

					camx=this->cameras[cam].position.X;
					camy=this->cameras[cam].position.Y;
					camz=this->cameras[cam].position.Z;

			
					//vector unitario
					float modulo;
			
					modulo = sqrt(1/(((camx-xp)*(camx-xp))+((camy-yp)*(camy-yp))+((camz-zp)*(camz-zp))));

					//mypro->mygetcameras[cam]foa(&c1x, &c1y, &c1z, 0);
					c1x=this->cameras[cam].foa.X;
					c1y=this->cameras[cam].foa.Y;
					c1z=this->cameras[cam].foa.Z;

					fmod = sqrt(1/(((camx-c1x)*(camx-c1x))+((camy-c1y)*(camy-c1y))+((camz-c1z)*(camz-c1z))));
					fx = (c1x - camx)*fmod;
					fy = (c1y - camy)*fmod;
					fz = (c1z - camz) * fmod;
					ux = (xp-camx)*modulo;
					uy = (yp-camy)*modulo;
					uz = (zp-camz)*modulo;

					Fx= d*fx + camx;
					Fy= d*fy + camy;
					Fz= d*fz + camz;

					/* calculamos el punto real */
					t = (-(fx*camx) + (fx*Fx) - (fy*camy) + (fy*Fy) - (fz*camz) + (fz*Fz))/((fx*ux) + (fy*uy) + (fz*uz));



					/*world->points[i][0]=distance*ux+camx;
					world->points[i][1]=distance*uy+camy;
					world->points[i][2]=distance*uz+camz;*/
					/*std::cout << xp << "," << yp << "," << zp << "," << modulo << std::endl;
					std::cout << xp-camx << "," << yp-camy<< "," << zp-camz << std::endl;
					std::cout << ux << "," << uy<< "," << uz << std::endl;*/
					//k= (80-yp)/uy;
					//std::cout << "distancia" << distance << std::endl;

					if (colour==0)
						world->add_kinect_point(t*ux + camx,t*uy+ camy,t*uz + camz,(int)imageRGB.data[3*(yIm*cWidth+xIm)],(int)imageRGB.data[3*(yIm*cWidth+xIm)+1],(int)imageRGB.data[3*(yIm*cWidth+xIm)+2]);
					else{
						int r,g,b;
						if (colour==1){
							r=255;
							g=0;
							b=0;
						}
						else if (colour==2){
							r=0;
							g=0;
							b=255;
						}
						else{
							r=0;
							g=1;
							b=0;
						}
						world->add_kinect_point(t*ux + camx,t*uy+ camy,t*uz + camz,r,g,b);
					}

				//world->add_line(distance*ux + camx,distance*uy+ camy,distance*uz + camz,camx,camy,camz);
				}
			}
		}
}
コード例 #5
0
unsigned int FaceDetector::detectFaceCamShift(cv::Mat img)
{
	/* Variables for CAM Shift */
	cv::Mat hsv = cv::Mat(img.size(), CV_8UC3 );
	cv::Mat mask = cv::Mat(img.size(), CV_8UC1);
	cv::Mat grayscale;
	cv::Mat backproject = cv::Mat( img.size(), CV_8UC1 );
	cv::Mat histimg = cv::Mat::zeros( img.size(), CV_8UC3);
	cv::Mat hue[hsv.channels()];
	cv::MatND hist;
	int vmin = 10, vmax = 256, smin = 30;
	int n = 16;
	float hranges_arr[] = {0,180};
	const float* hranges = hranges_arr;
	int channels[]={0};
	cv::Rect track_window, face_roi;
	cv::RotatedRect track_box;

	int mean_score_threshold = 70; //Minimum value of mean score of the CAM Shift. Value between 0 and 100
	float max_face_deslocation = detected_face_roi_.height/3.;

	unsigned int max_distance_width = detected_face_roi_.width/3.;
	unsigned int max_distance_height = detected_face_roi_.height/3.;

	/*********************/


	cv::cvtColor( img, hsv, CV_RGB2HSV );
	cv::cvtColor(img, grayscale, CV_RGB2GRAY);
	cv::inRange(hsv, cv::Scalar(0,smin,MIN(vmin,vmax),0),cv::Scalar(180,256,MAX(vmin,vmax),0), mask );
	split( hsv,hue);
	face_roi = detected_face_roi_;


	if(!track_object_) //Select a region with the face color skin and calc histogram
	{
		double max_val = 0.0;

		//ROI selection is a bit different now and uses operator ()

		/*
		 * little_face is a ROI that contains exclusively the color skin
		 */
		cv::Rect little_face;

		little_face = face_roi;

		little_face.width = face_roi.width -face_roi.width/3;
		little_face.height = face_roi.height -face_roi.height/3;
		little_face.x += face_roi.width/(2.*3.);
		little_face.y += face_roi.height/(2.*3.);

		cv::Mat hueRoi=hue[0](little_face);
		cv::Mat maskRoi=mask(little_face);
		cv::calcHist( &hueRoi,1,channels, maskRoi , hist, 1 ,  &n  ,  &hranges, true, 0 );
		cv::minMaxLoc(hist, 0, &max_val, 0, 0);

		float scale =max_val ? 255. / max_val : 0.;
		hist.convertTo(hist,hist.type(),scale,0);
		track_window = face_roi;
		histimg.setTo(0);
	}


	cv::calcBackProject(hue,1,channels,hist,backproject,&hranges,1,true);
	cv::bitwise_and( backproject, mask, backproject);

	//Use camshift over computed histogram
	track_box = cv::CamShift( backproject, track_window, cv::TermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ));


	if(track_box.size.height>0 && track_box.size.width > 0
			&& track_box.center.x>0 && track_box.center.y >0)
		face_roi = track_box.boundingRect();

	face_roi.height = face_ratio_*face_roi.width;

	face_roi.x = min(face_roi.x, img.cols);
	face_roi.y = min(face_roi.y, img.rows);

	face_roi.width = min(img.cols-face_roi.x-3, face_roi.width);
	face_roi.height = min(img.rows-face_roi.y-3, face_roi.height);


	face_roi.height = max(face_roi.height, 0);
	face_roi.width = max(face_roi.width, 0);

	face_roi.x = max(face_roi.x, 0);
	face_roi.y = max(face_roi.y, 0);


	if(face_roi.height <= 0 || face_roi.width <= 0) //If face ROI has invalid dimensions
		return 1;

	/*
	 * This is used to check mean score of CAM shift selected ROI
	 */
	double mean_score = 0;

	if(face_roi.x > 0 && face_roi.y > 0 && face_roi.width>20 && face_roi.height>20)
	{
		cv::Mat temp = backproject(face_roi);
		for(int r = 0; r < temp.rows; r++)
			for(int c = 0; c < temp.cols; c++)
				mean_score+= temp.at<unsigned char>(r,c);

		mean_score = mean_score/double(temp.rows*temp.cols);
	}


	if(mean_score<mean_score_threshold) //Let's see if CAM Shift respects mean score threshold
	{
		face_detected_bool_ = false;
		return 1;
	}

	//If face position have moved considerably, ignore CAM shift
	if(euclidDist(cv::Point2f(face_roi.x + face_roi.width/2.,face_roi.y+face_roi.height/2.),
			cv::Point2f(detected_face_roi_.x+detected_face_roi_.width/2,detected_face_roi_.y+detected_face_roi_.height/2.)) > max_face_deslocation)
	{
		face_detected_bool_ = false;

		return 1;
	}


	//This is to avoid big increases in the size of the detected_face_roi
	if(abs(face_roi.width - detected_face_roi_.width)> max_distance_width
			|| abs(face_roi.height - detected_face_roi_.height)> max_distance_height)
	{
		face_roi.x = face_roi.x + face_roi.width/2. -detected_face_roi_.width/2.;
		face_roi.y = face_roi.y + face_roi.height/2. -detected_face_roi_.height/2.;

		face_roi.width = detected_face_roi_.width;
		face_roi.height = detected_face_roi_.height;

	}

	//Check if Face ROI respects image's dimensions
	if(face_roi.x<0 || face_roi.y < 0
	|| (face_roi.x+face_roi.width)>= img.cols || (face_roi.y+face_roi.height)>= img.rows)
		return 1;


	//CAM Shift have succesfully found face
	detected_face_roi_ = face_roi;
	detected_face_ = img(detected_face_roi_);
	face_detected_bool_ = true;
	return 0;
}