示例#1
0
文件: main.cpp 项目: nem0301/Tetris
int main(int argc, char* argv[]) {

	int width = SIZE * WIDTH;
	int height = SIZE * HEIGHT;
	int size = SIZE;

	Mat img(height, width, CV_8UC3, Scalar(0));
	namedWindow("Display Image", WINDOW_AUTOSIZE);

	initialPlate(plate);
	while (true) {

		LBlock b(Point2f(WIDTH / 2, 3), plate);

		Block& block = b;

		Point2f point(0, 0);

		time_t tick, tock;
		time(&tick);
		while (true) {
			//image initialize by black
			img = Mat::zeros(height, width, CV_8UC3);

			//draw whole plate
			for (int y = 0; y < HEIGHT; y++) {
				for (int x = 0; x < WIDTH; x++) {
					int temp = y * WIDTH + x;
					if (plate[temp] != 0) {
						Point2f tempPoint1(x * size, y * size);
						Point2f tempPoint2(x * size + size - 1,
								y * size + size - 1);
						Rect rect(tempPoint1, tempPoint2);
						rectangle(img, rect, Scalar(255, 255, 255), -1);
					}
				}
			}

			//get input
			int input = cvWaitKey(1);
			if (input == 65361) {		//left
				block.move(-1);
			} else if (input == 65362) {	//up
				block.rotate();
			} else if (input == 65363) {	//right
				block.move(1);
			} else if (input == 65364) {	//down
				block.move(0);
			} else if (input == 32) {		//space
				block.moveFullDown();
				checkLines(plate);
				break;
			} else if (input == 27)		//esc
				return 0;;

			time(&tock);
			if (tock - tick > 0) {
				int deadOrNot = block.move(0);
				tick = tock;
				if (deadOrNot == -1) {
					break;
				}
			}

			imshow("Display Image", img);

		}
	}

	destroyAllWindows();
	cvWaitKey();
	return 0;
}
int main(int argc, char* argv[])
{
	// welcome message
	std::cout<<"*********************************************************************************"<<std::endl;
	std::cout<<"* Retina demonstration for High Dynamic Range compression (tone-mapping) : demonstrates the use of a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl;
	std::cout<<"* This retina model allows spatio-temporal image processing (applied on still images, video sequences)."<<std::endl;
	std::cout<<"* This demo focuses demonstration of the dynamic compression capabilities of the model"<<std::endl;
	std::cout<<"* => the main application is tone mapping of HDR images (i.e. see on a 8bit display a more than 8bits coded (up to 16bits) image with details in high and low luminance ranges"<<std::endl;
	std::cout<<"* The retina model still have the following properties:"<<std::endl;
	std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl;
	std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl;
	std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl;
	std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl;
	std::cout<<"* for more information, reer to the following papers :"<<std::endl;
	std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl;
	std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl;
	std::cout<<"* => reports comments/remarks at [email protected]"<<std::endl;
	std::cout<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl;
	std::cout<<"*********************************************************************************"<<std::endl;
	std::cout<<"** WARNING : this sample requires OpenCV to be configured with OpenEXR support **"<<std::endl;
	std::cout<<"*********************************************************************************"<<std::endl;
	std::cout<<"*** You can use free tools to generate OpenEXR images from images sets   :    ***"<<std::endl;
	std::cout<<"*** =>  1. take a set of photos from the same viewpoint using bracketing      ***"<<std::endl;
	std::cout<<"*** =>  2. generate an OpenEXR image with tools like qtpfsgui.sourceforge.net ***"<<std::endl;
	std::cout<<"*** =>  3. apply tone mapping with this program                               ***"<<std::endl;
	std::cout<<"*********************************************************************************"<<std::endl;

	// basic input arguments checking
	if (argc<2)
	{
		help("bad number of parameter");
		return -1;
	}

	bool useLogSampling = !strcmp(argv[argc-1], "log"); // check if user wants retina log sampling processing
	int chosenMethod=0;
	if (!strcmp(argv[argc-1], "fast"))
	{
		chosenMethod=1;
		std::cout<<"Using fast method (no spectral whithning), adaptation of Meylan&al 2008 method"<<std::endl;
	}

	std::string inputImageName=argv[1];

	//////////////////////////////////////////////////////////////////////////////
	// checking input media type (still image, video file, live video acquisition)
	std::cout<<"RetinaDemo: processing image "<<inputImageName<<std::endl;
	// image processing case
	// declare the retina input buffer... that will be fed differently in regard of the input media
	inputImage = cv::imread(inputImageName, -1); // load image in RGB mode
	std::cout<<"=> image size (h,w) = "<<inputImage.size().height<<", "<<inputImage.size().width<<std::endl;
	if (!inputImage.total())
	{
		help("could not load image, program end");
		return -1;
	}
	// rescale between 0 and 1
	normalize(inputImage, inputImage, 0.0, 1.0, cv::NORM_MINMAX);
	cv::Mat gammaTransformedImage;
	cv::pow(inputImage, 1./5, gammaTransformedImage); // apply gamma curve: img = img ** (1./5)
	imshow("EXR image original image, 16bits=>8bits linear rescaling ", inputImage);
	imshow("EXR image with basic processing : 16bits=>8bits with gamma correction", gammaTransformedImage);
	if (inputImage.empty())
	{
		help("Input image could not be loaded, aborting");
		return -1;
	}

	//////////////////////////////////////////////////////////////////////////////
	// Program start in a try/catch safety context (Retina may throw errors)
	try
	{
		/* create a retina instance with default parameters setup, uncomment the initialisation you wanna test
		 * -> if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
		 */
		if (useLogSampling)
		{
			retina = cv::bioinspired::createRetina(inputImage.size(),true, cv::bioinspired::RETINA_COLOR_BAYER, true, 2.0, 10.0);
		}
		else// -> else allocate "classical" retina :
			retina = cv::bioinspired::createRetina(inputImage.size());

		// create a fast retina tone mapper (Meyla&al algorithm)
		std::cout<<"Allocating fast tone mapper..."<<std::endl;
		//cv::Ptr<cv::RetinaFastToneMapping> fastToneMapper=createRetinaFastToneMapping(inputImage.size());
		std::cout<<"Fast tone mapper allocated"<<std::endl;

		// save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
		retina->write("RetinaDefaultParameters.xml");

		// desactivate Magnocellular pathway processing (motion information extraction) since it is not usefull here
		retina->activateMovingContoursProcessing(false);

		// declare retina output buffers
		cv::Mat retinaOutput_parvo;

		/////////////////////////////////////////////
		// prepare displays and interactions
		histogramClippingValue=0; // default value... updated with interface slider
		//inputRescaleMat = inputImage;
		//outputRescaleMat = imageInputRescaled;
		cv::namedWindow("Processing configuration",1);
		cv::createTrackbar("histogram edges clipping limit", "Processing configuration",&histogramClippingValue,50,callBack_rescaleGrayLevelMat);

		colorSaturationFactor=3;
		cv::createTrackbar("Color saturation", "Processing configuration", &colorSaturationFactor,5,callback_saturateColors);

		retinaHcellsGain=40;
		cv::createTrackbar("Hcells gain", "Processing configuration",&retinaHcellsGain,100,callBack_updateRetinaParams);

		localAdaptation_photoreceptors=197;
		localAdaptation_Gcells=190;
		cv::createTrackbar("Ph sensitivity", "Processing configuration", &localAdaptation_photoreceptors,199,callBack_updateRetinaParams);
		cv::createTrackbar("Gcells sensitivity", "Processing configuration", &localAdaptation_Gcells,199,callBack_updateRetinaParams);


		/////////////////////////////////////////////
		// apply default parameters of user interaction variables
		rescaleGrayLevelMat(inputImage, imageInputRescaled, (float)histogramClippingValue/100);
		retina->setColorSaturation(true,(float)colorSaturationFactor);
		callBack_updateRetinaParams(1,NULL); // first call for default parameters setup

		// processing loop with stop condition
		bool continueProcessing=true;
		while(continueProcessing)
		{
			// run retina filter
			if (!chosenMethod)
			{
				retina->run(imageInputRescaled);
				// Retrieve and display retina output
				retina->getParvo(retinaOutput_parvo);
				cv::imshow("Retina input image (with cut edges histogram for basic pixels error avoidance)", imageInputRescaled/255.0);
				cv::imshow("Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", retinaOutput_parvo);
				cv::imwrite("HDRinput.jpg",imageInputRescaled/255.0);
				cv::imwrite("RetinaToneMapping.jpg",retinaOutput_parvo);
			}
			else
			{
				// apply the simplified hdr tone mapping method
				cv::Mat fastToneMappingOutput;
				retina->applyFastToneMapping(imageInputRescaled, fastToneMappingOutput);
				cv::imshow("Retina fast tone mapping output : 16bit=>8bit image retina tonemapping", fastToneMappingOutput);
			}
			/*cv::Mat fastToneMappingOutput_specificObject;
             fastToneMapper->setup(3.f, 1.5f, 1.f);
             fastToneMapper->applyFastToneMapping(imageInputRescaled, fastToneMappingOutput_specificObject);
             cv::imshow("### Retina fast tone mapping output : 16bit=>8bit image retina tonemapping", fastToneMappingOutput_specificObject);
			 */
			cv::waitKey(10);
		}
	}catch(cv::Exception e)
	{
		std::cerr<<"Error using Retina : "<<e.what()<<std::endl;
	}

	// Program end message
	std::cout<<"Retina demo end"<<std::endl;

	return 0;
}
示例#3
0
//Perform a background subtraction in two cameras in a sequencial way
void depthBackgroundSub_Seq(KinectSensor* cam1,  KinectSensor* cam2)
{
	char* windName_1 = "BackgroundSub 1";
	char* windName_2 = "BackgroundSub 2";
	Mat backImg1(Size(XN_VGA_X_RES, XN_VGA_Y_RES), CV_8UC1);
	Mat backImg2(Size(XN_VGA_X_RES, XN_VGA_Y_RES), CV_8UC1);
	namedWindow(windName_1);
	namedWindow(windName_2);

	cam1->startDevice();
	cam2->startDevice();

	bool stop = false;
	bool firstTime = true;

	int total = XN_VGA_Y_RES*XN_VGA_X_RES;
	BackgroundDepthSubtraction *subtractor1, *subtractor2;
	//allocate enough memory in advance (% of the total points)
	XnPoint3D* points2D_1 = new XnPoint3D[MAX_FORGROUND_POINTS];	
	XnPoint3D* points2D_2 = new XnPoint3D[MAX_FORGROUND_POINTS];	
	int numPoints_1 = 0;
	int numPoints_2 = 0;
	int contFrames = 0;

//	unsigned short depth[MAX_DEPTH];
//	unsigned short depth2[MAX_DEPTH];
	char *depth_data, *depth_data2;
	while (!stop)
	{
		//wait for the next frame to be ready
		cam1->waitAndUpdate();
		cam2->waitAndUpdate();
		//recover the depth map
		const XnDepthPixel* dm1 = cam1->getDepthMap();
		const XnDepthPixel* dm2 = cam2->getDepthMap();


		//ptime time_start_wait(microsec_clock::local_time());
		if (contFrames == 0)//store the background model
		{
			subtractor1 = new BackgroundDepthSubtraction(dm1);
			subtractor2 = new BackgroundDepthSubtraction(dm2);
		}
		else 
		{
			numPoints_1 = subtractor1->subtraction(points2D_1, dm1); //returns the num poins of foreground
			numPoints_2 = subtractor2->subtraction(points2D_2, dm2); //returns the num poins of foreground
		}
		
		//ptime time_end_wait(microsec_clock::local_time());
		//time_duration duration_wait(time_end_wait - time_start_wait);
		//(*outDebug) << "Time report(bgs 1_2): " << duration_wait.total_microseconds() << endl;
		
		Utils::initMat1u(backImg1, 0);
		Utils::initMat1u(backImg2, 0);
		subtractor1->createBackImage(points2D_1, backImg1, numPoints_1);
		subtractor2->createBackImage(points2D_2, backImg2, numPoints_2);

		imshow(windName_1, backImg1);
		imshow(windName_2, backImg2);
		////display image
		char c = cvWaitKey(1);
		stop = (c == 27) || (contFrames == 250);

		stop = (contFrames == 250);
		
		//for recorded videos
	//	if (cam2->getDepthNode()->GetFrameID() == 1)
	//		if (firstTime ? firstTime = false : stop = true);

		contFrames++;
	}
	//ptime time_end(microsec_clock::local_time());
	//time_duration duration(time_end - time_start);
	//double totalSecs = duration.total_microseconds()/1000000;
	//double fps = contFrames/totalSecs;
	//cout << "Fps: " << fps << endl;

	cam1->stopDevice();
	cam2->stopDevice();

	//free memory
	delete(points2D_1);
	delete(points2D_2);
	delete(subtractor1);
	delete(subtractor2);
}
示例#4
0
cv::Point findEyeCenter(cv::Mat face, cv::Rect eye, std::string debugWindow) {
    cv::Mat eyeROIUnscaled = face(eye);
    cv::Mat eyeROI;
    scaleToFastSize(eyeROIUnscaled, eyeROI);
    // draw eye region
    rectangle(face,eye,1234);
    //-- Find the gradient
    cv::Mat gradientX = computeMatXGradient(eyeROI);
    cv::Mat gradientY = computeMatXGradient(eyeROI.t()).t();
    //-- Normalize and threshold the gradient
    // compute all the magnitudes
    cv::Mat mags = matrixMagnitude(gradientX, gradientY);
    //compute the threshold
    double gradientThresh = computeDynamicThreshold(mags, kGradientThreshold);
    //double gradientThresh = kGradientThreshold;
    //double gradientThresh = 0;
    //normalize
    for (int y = 0; y < eyeROI.rows; ++y) {
        double *Xr = gradientX.ptr<double>(y), *Yr = gradientY.ptr<double>(y);
        const double *Mr = mags.ptr<double>(y);
        for (int x = 0; x < eyeROI.cols; ++x) {
            double gX = Xr[x], gY = Yr[x];
            double magnitude = Mr[x];
            if (magnitude > gradientThresh) {
                Xr[x] = gX/magnitude;
                Yr[x] = gY/magnitude;
            } else {
                Xr[x] = 0.0;
                Yr[x] = 0.0;
            }
        }
    }
    imshow(debugWindow,gradientX);
    //-- Create a blurred and inverted image for weighting
    cv::Mat weight;
    GaussianBlur( eyeROI, weight, cv::Size( kWeightBlurSize, kWeightBlurSize ), 0, 0 );
    for (int y = 0; y < weight.rows; ++y) {
        unsigned char *row = weight.ptr<unsigned char>(y);
        for (int x = 0; x < weight.cols; ++x) {
            row[x] = (255 - row[x]);
        }
    }
    //imshow(debugWindow,weight);
    //-- Run the algorithm!
    cv::Mat outSum = cv::Mat::zeros(eyeROI.rows,eyeROI.cols,CV_64F);
    // for each possible gradient location
    // Note: these loops are reversed from the way the paper does them
    // it evaluates every possible center for each gradient location instead of
    // every possible gradient location for every center.
    printf("Eye Size: %ix%i\n",outSum.cols,outSum.rows);
    for (int y = 0; y < weight.rows; ++y) {
        const unsigned char *Wr = weight.ptr<unsigned char>(y);
        const double *Xr = gradientX.ptr<double>(y), *Yr = gradientY.ptr<double>(y);
        for (int x = 0; x < weight.cols; ++x) {
            double gX = Xr[x], gY = Yr[x];
            if (gX == 0.0 && gY == 0.0) {
                continue;
            }
            testPossibleCentersFormula(x, y, Wr[x], gX, gY, outSum);
        }
    }
    // scale all the values down, basically averaging them
    double numGradients = (weight.rows*weight.cols);
    cv::Mat out;
    outSum.convertTo(out, CV_32F,1.0/numGradients);
    //imshow(debugWindow,out);
    //-- Find the maximum point
    cv::Point maxP;
    double maxVal;
    cv::minMaxLoc(out, NULL,&maxVal,NULL,&maxP);
    //-- Flood fill the edges
    if(kEnablePostProcess) {
        cv::Mat floodClone;
        //double floodThresh = computeDynamicThreshold(out, 1.5);
        double floodThresh = maxVal * kPostProcessThreshold;
        cv::threshold(out, floodClone, floodThresh, 0.0f, cv::THRESH_TOZERO);
        if(kPlotVectorField) {
            //plotVecField(gradientX, gradientY, floodClone);
            imwrite("eyeFrame.png",eyeROIUnscaled);
        }
        cv::Mat mask = floodKillEdges(floodClone);
        //imshow(debugWindow + " Mask",mask);
        //imshow(debugWindow,out);
        // redo max
        cv::minMaxLoc(out, NULL,&maxVal,NULL,&maxP,mask);
    }
    return unscalePoint(maxP,eye);
}
示例#5
0
void displayImage(Config* config, string windowName, cv::Mat frame)
{
  if (config->debugShowImages)
    imshow(windowName, frame);
}
void OpenniFilter::cloud_cb_ (const pcl::PointCloud<pcl::PointXYZRGBA>::ConstPtr &cloud)
{
    if (!viewer.wasStopped())
    {
        if (cloud->isOrganized())
        {
            // initialize all the Mats to store intermediate steps
            int cloudHeight = cloud->height;
            int cloudWidth = cloud->width;
            rgbFrame = Mat(cloudHeight, cloudWidth, CV_8UC3);
            drawing = Mat(cloudHeight, cloudWidth, CV_8UC3, NULL);
            grayFrame = Mat(cloudHeight, cloudWidth, CV_8UC1, NULL);
            hsvFrame = Mat(cloudHeight, cloudWidth, CV_8UC3, NULL);
            contourMask = Mat(cloudHeight, cloudWidth, CV_8UC1, NULL);

            if (!cloud->empty())
            {
                for (int h = 0; h < rgbFrame.rows; h ++)
                {
                    for (int w = 0; w < rgbFrame.cols; w++)
                    {
                        pcl::PointXYZRGBA point = cloud->at(w, cloudHeight-h-1);
                        Eigen::Vector3i rgb = point.getRGBVector3i();
                        rgbFrame.at<Vec3b>(h,w)[0] = rgb[2];
                        rgbFrame.at<Vec3b>(h,w)[1] = rgb[1];
                        rgbFrame.at<Vec3b>(h,w)[2] = rgb[0];
                    }
                }

                // do the filtering 
                int xPos = 0;
                int yPos = 0;
                mtx.lock();
                xPos = mouse_x;
                yPos = mouse_y;
                mtx.unlock();

                // color filtering based on what is chosen by users
                cvtColor(rgbFrame, hsvFrame, CV_RGB2HSV);
                Vec3b pixel = hsvFrame.at<Vec3b>(xPos,yPos);

                int hueLow = pixel[0] < iHueDev ? pixel[0] : pixel[0] - iHueDev;
                int hueHigh = pixel[0] > 255 - iHueDev ? pixel[0] : pixel[0] + iHueDev;
                // inRange(hsvFrame, Scalar(hueLow, pixel[1]-20, pixel[2]-20), Scalar(hueHigh, pixel[1]+20, pixel[2]+20), grayFrame);
                inRange(hsvFrame, Scalar(hueLow, iLowS, iLowV), Scalar(hueHigh, iHighS, iHighV), grayFrame);

                // removes small objects from the foreground by morphological opening
                erode(grayFrame, grayFrame, getStructuringElement(MORPH_ELLIPSE, Size(5,5)));
                dilate(grayFrame, grayFrame, getStructuringElement(MORPH_ELLIPSE, Size(5,5)));

                // morphological closing (removes small holes from the foreground)
                dilate(grayFrame, grayFrame, getStructuringElement(MORPH_ELLIPSE, Size(5,5)));
                erode(grayFrame, grayFrame, getStructuringElement(MORPH_ELLIPSE, Size(5,5)));

                // gets contour from the grayFrame and keeps the largest contour
                Mat cannyOutput;
                vector<vector<Point> > contours;
                vector<Vec4i> hierarchy;
                int thresh = 100;
                Canny(grayFrame, cannyOutput, thresh, thresh * 2, 3);
                findContours(cannyOutput, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
                int largestContourArea, largestContourIndex = 0;
                int defaultContourArea = 1000; // 1000 seems to work find in most cases... cannot prove this
                vector<vector<Point> > newContours;
                for (int i = 0; i < contours.size(); i++)
                {
                    double area = contourArea(contours[i], false);
                    if (area > defaultContourArea)
                        newContours.push_back(contours[i]);
                }

                // draws the largest contour: 
                drawing = Mat::zeros(cannyOutput.size(), CV_8UC3);
                for (int i = 0; i < newContours.size(); i++)
                    drawContours(drawing, newContours, i, Scalar(255, 255, 255), CV_FILLED, 8, hierarchy, 0, Point());

                // gets the filter by setting everything within the contour to be 1. 
                inRange(drawing, Scalar(1, 1, 1), Scalar(255, 255, 255), contourMask);

                // filters the point cloud based on contourMask
                // again go through the point cloud and filter out unnecessary points
                pcl::PointCloud<pcl::PointXYZRGBA>::Ptr resultCloud (new pcl::PointCloud<pcl::PointXYZRGBA>);
                pcl::PointXYZRGBA newPoint;
                for (int h = 0; h < contourMask.rows; h ++)
                {
                    for (int w = 0; w < contourMask.cols; w++)
                    {
                        if (contourMask.at<uchar>(h,w) > 0)
                        {
                            newPoint = cloud->at(w,h);
                            resultCloud->push_back(newPoint);
                        }
                    }
                }

                if (xPos == 0 && yPos == 0)
                    viewer.showCloud(cloud);
                else
                    viewer.showCloud(resultCloud);
                
                imshow("tracker", rgbFrame);
                imshow("filtered result", contourMask);
                char key = waitKey(1);
                if (key == 27) 
                {
                    interface->stop();
                    return;
                }
            }
            else
                cout << "Warning: Point Cloud is empty" << endl;
        }
        else
            cout << "Warning: Point Cloud is not organized" << endl;
    }
}
示例#7
0
bool pointerNextFrame(int &xInOut, int &yInOut, int &zInOut)
{
	rs::device & dev = *app_state.dev;
	if (dev.is_streaming()) dev.wait_for_frames();

	auto t1 = std::chrono::high_resolution_clock::now();
	nexttime += std::chrono::duration<float>(t1 - t0).count();
	t0 = t1;
	++frames;
	if (nexttime > 0.5f)
	{
		fps = frames / nexttime;
		frames = 0;
		nexttime = 0;
	}

	const rs::stream tex_stream = app_state.tex_streams[app_state.index];
	app_state.depth_scale = dev.get_depth_scale();
	app_state.extrin = dev.get_extrinsics(rs::stream::depth, tex_stream);
	app_state.depth_intrin = dev.get_stream_intrinsics(rs::stream::depth);
	app_state.tex_intrin = dev.get_stream_intrinsics(tex_stream);
	app_state.identical = app_state.depth_intrin == app_state.tex_intrin && app_state.extrin.is_identity();

	// setup the OpenCV Mat structures
	cv::Mat depth16(app_state.depth_intrin.height, app_state.depth_intrin.width, CV_16U, (uchar *)dev.get_frame_data(rs::stream::depth));
	
	rs::intrinsics color_intrin = dev.get_stream_intrinsics(rs::stream::color);
	cv::Mat rgb(color_intrin.height, color_intrin.width, CV_8UC3, (uchar *)dev.get_frame_data(rs::stream::color));

	// ignore depth greater than 800 mm's.
	depth16.setTo(10000, depth16 > 800);
	depth16.setTo(10000, depth16 == 0);
	cv::Mat depth8u = depth16 < 800;

	cv::Point handPoint(0, 0);
	for (int y = 0; y < depth8u.rows; ++y)
	{
		uchar *d = depth8u.row(y).ptr();
		for (int x = 0; x < depth8u.cols; ++x)
		{
			if (d[x])
			{
				int floodCount = cv::floodFill(depth8u, cv::Point(x, y), 255);
				// we have found the top most point
				if (floodCount > 100)
				{
					handPoint = cv::Point(x, y);
					break;
				}
			}
		}
		if (handPoint != cv::Point(0, 0)) break;
	}

	if (handPoint != cv::Point(0, 0))
		cv::circle(depth8u, handPoint, 10, 128, cv::FILLED);
	imshow("depth8u", depth8u);
	cv::cvtColor(rgb, rgb, cv::COLOR_BGR2RGB);
	imshow("rgb", rgb);
	xInOut = handPoint.x;
	yInOut = handPoint.y;
	zInOut = 0; // not using this yet.
	if (handPoint == cv::Point(0, 0)) return false;
	return true;
}
void rgb_pcl::cloud_cb (const sensor_msgs::PointCloud2ConstPtr& input){
	
	// Container for original & filtered data
#if PR2
	if(target_frame.find(base_frame) == std::string::npos){
		getTransformCloud(input, *input);
	}
	sensor_msgs::PointCloud2 in = *input;
	sensor_msgs::PointCloud2 out;
	pcl_ros::transformPointCloud(target_frame, net_transform, in, out);
#endif
	
// 	ROS_INFO("Cloud acquired...");
	
	pcl::PCLPointCloud2* cloud = new pcl::PCLPointCloud2; 
	pcl::PCLPointCloud2ConstPtr cloudPtr(cloud);
	pcl::PCLPointCloud2::Ptr cloud_filtered_blob (new pcl::PCLPointCloud2);
				  
	pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_filtered (new pcl::PointCloud<pcl::PointXYZRGB>), 
						 cloud_p (new pcl::PointCloud<pcl::PointXYZRGB>), 
						 cloud_f (new pcl::PointCloud<pcl::PointXYZRGB>);
						 
	Mat displayImage = cv::Mat(Size(640, 480), CV_8UC3);
	displayImage = Scalar(120);

	// Convert to PCL data type
#if PR2
	pcl_conversions::toPCL(out, *cloud);
#endif
#if !PR2
	pcl_conversions::toPCL(*input, *cloud);
#endif
// 	ROS_INFO("\t=>Cloud rotated...");

	// Perform the actual filtering
	pcl::VoxelGrid<pcl::PCLPointCloud2> sor;
	sor.setInputCloud (cloudPtr);
	sor.setLeafSize (0.005, 0.005, 0.005);
	sor.filter (*cloud_filtered_blob);
	
	pcl::fromPCLPointCloud2 (*cloud_filtered_blob, *cloud_filtered);

	ModelCoefficientsPtr coefficients (new pcl::ModelCoefficients);
	
	PointCloudPtr plane_points(new PointCloud), point_points_2d_hull(new PointCloud);
		
	std::vector<PointCloudPtr> object_clouds;
	pcl::PointCloud<pcl::PointXYZRGB> combinedCloud;
	
#if PR2
	make_crop_box_marker(marker_publisher, base_frame, 0, 0.2, -1, 0.2, 1.3, 2, 1.3);
// 	Define your cube with two points in space: 
	Eigen::Vector4f minPoint; 
	minPoint[0]=0.2;  // define minimum point x 
	minPoint[1]=-1;  // define minimum point y 
	minPoint[2]=0.2;  // define minimum point z 
	Eigen::Vector4f maxPoint; 
	maxPoint[0]=1.5;  // define max point x 
	maxPoint[1]=1;  // define max point y 
	maxPoint[2]=1.5;  // define max point z 

	pcl::CropBox<pcl::PointXYZRGB> cropFilter; 
	cropFilter.setInputCloud (cloud_filtered); 
	cropFilter.setMin(minPoint); 
	cropFilter.setMax(maxPoint); 

   	cropFilter.filter (*cloud_filtered); 
#endif
	
#if !PR2
	//Rotate the point cloud
	Eigen::Affine3f transform_1 = Eigen::Affine3f::Identity();

	// Define a rotation matrix (see https://en.wikipedia.org/wiki/Rotation_matrix)
	float theta = M_PI; // The angle of rotation in radians

	// Define a translation of 0 meters on the x axis
	transform_1.translation() << 0.0, 0.0, 1.0;

	// The same rotation matrix as before; tetha radians arround X axis
	transform_1.rotate (Eigen::AngleAxisf (theta, Eigen::Vector3f::UnitX()));

	// Executing the transformation
	pcl::transformPointCloud (*cloud_filtered, *cloud_filtered, transform_1);
#endif
	
	interpretTableScene(cloud_filtered, coefficients, plane_points, point_points_2d_hull, object_clouds);
	
	int c = 0;
#if PUBLISH_CLOUDS
	int ID_object = -1;
#endif
	for(auto cloudCluster: object_clouds){
// 		get_cloud_matching(cloudCluster); //histogram matching
	
#if PUBLISH_CLOUDS
		ID_object = c;
#endif
		combinedCloud += *cloudCluster;
		combinedCloud.header = cloud_filtered->header;
		c++;
	}
	
#if DISPLAY
	drawPointCloud(combinedCloud, displayImage);
#endif
	
	getTracker(object_clouds, displayImage);
	
	stateDetection();
// 	ROS_INFO("\t=>Cloud analysed...");
	
#if PUBLISH_CLOUDS
	
	sensor_msgs::PointCloud2 output;
	
	if(object_clouds.size() >= ID_object && ID_object >= 0){
		pcl::toROSMsg(combinedCloud, output);
		// Publish the data
		pub.publish (output);
	}
	
#endif
	
	end = ros::Time::now();
	std::stringstream ss;
	ss <<(end-begin);
	string s_FPS = ss.str();
#if DISPLAY
	cv::putText(displayImage, "FPS: "+to_string((int)1/(stof(s_FPS))) + "   Desired: "+to_string(DESIRED_FPS), cv::Point(10, 10), CV_FONT_HERSHEY_COMPLEX, 0.4, Scalar(0,0,0));
	imshow("RGB", displayImage);
#endif
	waitKey(1);

	begin = ros::Time::now();
	
}
示例#9
0
int main(int argc, char **argv) {

    if (argc > 3) {
        std::cout << "Only the path of a SVO or a InitParams file can be passed in arg." << std::endl;
        return -1;
    }

    // Quick check input arguments
    bool readSVO = false;
    std::string SVOName;
    bool loadParams = false;
    std::string ParamsName;
    if (argc > 1) {
        std::string _arg;
        for (int i = 1; i < argc; i++) {
            _arg = argv[i];
            if (_arg.find(".svo") != std::string::npos) {
                // If a SVO is given we save its name
                readSVO = true;
                SVOName = _arg;
            }
            if (_arg.find(".ZEDinitParam") != std::string::npos) {
                // If a parameter file is given we save its name
                loadParams = true;
                ParamsName = _arg;
            }
        }
    }

    sl::zed::Camera* zed;

    if (!readSVO) // Live Mode
        zed = new sl::zed::Camera(sl::zed::HD720);
    else // SVO playback mode
        zed = new sl::zed::Camera(SVOName);

    // Define a struct of parameters for the initialization
    sl::zed::InitParams params;

    if (loadParams) // A parameters file was given in argument, we load it
        params.load(ParamsName);

    // Enables verbosity in the console
    params.verbose = true;


    sl::zed::ERRCODE err = zed->init(params);
    std::cout << "Error code : " << sl::zed::errcode2str(err) << std::endl;
    if (err != sl::zed::SUCCESS) {
        // Exit if an error occurred
        delete zed;
        return 1;
    }

    // Save the initialization parameters
    // The file can be used later in any zed based application
    params.save("MyParam");

    char key = ' ';
    int viewID = 0;
    int confidenceThres = 100;

    bool displayDisp = true;
    bool displayConfidenceMap = false;

    int width = zed->getImageSize().width;
    int height = zed->getImageSize().height;

    cv::Mat disp(height, width, CV_8UC4);
    cv::Mat anaglyph(height, width, CV_8UC4);
    cv::Mat confidencemap(height, width, CV_8UC4);

    cv::Size displaySize(720, 404);
    cv::Mat dispDisplay(displaySize, CV_8UC4);
    cv::Mat anaglyphDisplay(displaySize, CV_8UC4);
    cv::Mat confidencemapDisplay(displaySize, CV_8UC4);

    sl::zed::SENSING_MODE dm_type = sl::zed::STANDARD;

    // Mouse callback initialization
    sl::zed::Mat depth;
    zed->grab(dm_type);
    depth = zed->retrieveMeasure(sl::zed::MEASURE::DEPTH); // Get the pointer
    // Set the structure
    mouseStruct._image = cv::Size(width, height);
    mouseStruct._resize = displaySize;
    mouseStruct.data = (float*) depth.data;
    mouseStruct.step = depth.step;
    mouseStruct.name = "DEPTH";
    mouseStruct.unit = unit2str(params.unit);

    // The depth is limited to 20 METERS, as defined in zed::init()
    zed->setDepthClampValue(10000);

    // Create OpenCV Windows
    // NOTE: You may encounter an issue with OpenGL support, to solve it either
    // 	use the default rendering by removing ' | cv::WINDOW_OPENGL' from the flags
    //	or recompile OpenCV with OpenGL support (you may also need the gtk OpenGL Extension
    //	on Linux, provided by the packages libgtkglext1 libgtkglext1-dev)
    cv::namedWindow(mouseStruct.name, cv::WINDOW_AUTOSIZE | cv::WINDOW_OPENGL);
    cv::setMouseCallback(mouseStruct.name, onMouseCallback, (void*) &mouseStruct);
    cv::namedWindow("VIEW", cv::WINDOW_AUTOSIZE | cv::WINDOW_OPENGL);

    std::cout << "Press 'q' to exit" << std::endl;

    // Jetson only. Execute the calling thread on core 2
    sl::zed::Camera::sticktoCPUCore(2);

    sl::zed::ZED_SELF_CALIBRATION_STATUS old_self_calibration_status = sl::zed::SELF_CALIBRATION_NOT_CALLED;

    // Loop until 'q' is pressed
    while (key != 'q') {
        // Disparity Map filtering
        zed->setConfidenceThreshold(confidenceThres);

        // Get frames and launch the computation
        bool res = zed->grab(dm_type);

        if (!res) {
            if (old_self_calibration_status != zed->getSelfCalibrationStatus()) {
                std::cout << "Self Calibration Status : " << sl::zed::statuscode2str(zed->getSelfCalibrationStatus()) << std::endl;
                old_self_calibration_status = zed->getSelfCalibrationStatus();
            }

            depth = zed->retrieveMeasure(sl::zed::MEASURE::DEPTH); // Get the pointer

            // The following is the best way to retrieve a disparity map / image / confidence map in OpenCV Mat.
            // If the buffer is not duplicated, it will be replaced by a next retrieve (retrieveImage, normalizeMeasure, getView...)
            // Disparity, depth, confidence are 32F buffer by default and 8UC4 buffer in normalized format (displayable grayscale)


            // -- The next part is about displaying the data --

            // Normalize the disparity / depth map in order to use the full color range of gray level image
            if (displayDisp)
                slMat2cvMat(zed->normalizeMeasure(sl::zed::MEASURE::DISPARITY)).copyTo(disp);
            else
                slMat2cvMat(zed->normalizeMeasure(sl::zed::MEASURE::DEPTH)).copyTo(disp);

            // To get the depth at a given position, click on the disparity / depth map image
            cv::resize(disp, dispDisplay, displaySize);
            imshow(mouseStruct.name, dispDisplay);

            if (displayConfidenceMap) {
                slMat2cvMat(zed->normalizeMeasure(sl::zed::MEASURE::CONFIDENCE)).copyTo(confidencemap);
                cv::resize(confidencemap, confidencemapDisplay, displaySize);
                imshow("confidence", confidencemapDisplay);
            }

            // 'viewID' can be 'SIDE mode' or 'VIEW mode'
            if (viewID >= sl::zed::LEFT && viewID < sl::zed::LAST_SIDE)
                slMat2cvMat(zed->retrieveImage(static_cast<sl::zed::SIDE> (viewID))).copyTo(anaglyph);
            else
                slMat2cvMat(zed->getView(static_cast<sl::zed::VIEW_MODE> (viewID - (int) sl::zed::LAST_SIDE))).copyTo(anaglyph);

            cv::resize(anaglyph, anaglyphDisplay, displaySize);
            imshow("VIEW", anaglyphDisplay);

            key = cv::waitKey(5);

            // Keyboard shortcuts
            switch (key) {
                case 'b':
                    if (confidenceThres >= 10)
                        confidenceThres -= 10;
                    break;
                case 'n':
                    if (confidenceThres <= 90)
                        confidenceThres += 10;
                    break;
                    // From 'SIDE' enum
                case '0': // Left
                    viewID = 0;
                    std::cout << "Current View switched to Left (rectified/aligned)" << std::endl;
                    break;
                case '1': // Right
                    viewID = 1;
                    std::cout << "Current View switched to Right (rectified/aligned)" << std::endl;
                    break;
                    // From 'VIEW' enum
                case '2': // Side by Side
                    viewID = 10;
                    std::cout << "Current View switched to Side by Side mode" << std::endl;
                    break;
                case '3': // Overlay
                    viewID = 11;
                    std::cout << "Current View switched to Overlay mode" << std::endl;
                    break;
                case '4': // Difference
                    viewID = 9;
                    std::cout << "Current View switched to Difference mode" << std::endl;
                    break;
                case '5': // Anaglyph
                    viewID = 8;
                    std::cout << "Current View switched to Anaglyph mode" << std::endl;
                    break;
                case 'c':
                    displayConfidenceMap = !displayConfidenceMap;
                    break;
                case 's':
                    dm_type = (dm_type == sl::zed::SENSING_MODE::STANDARD) ? sl::zed::SENSING_MODE::FILL : sl::zed::SENSING_MODE::STANDARD;
                    std::cout << "SENSING_MODE " << sensing_mode2str(dm_type) << std::endl;
                    break;
                case 'd':
                    displayDisp = !displayDisp;
                    break;
            }
        } else key = cv::waitKey(5);
    }

    delete zed;
    return 0;
}
示例#10
0
RotatedRect searchFace(Mat& src, GenericModel *model, cv::Size2f scaleFactor, bool draw){
	
	GenericFeature *minFeature;
	Mat auxImg, auxImg2;
	resize(src, auxImg,cv::Size2i(scaleFactor.width*src.size().width, scaleFactor.height*src.size().height));
	auxImg2 = auxImg.clone();
	
	CvHaarClassifierCascade* cascade = (CvHaarClassifierCascade*) cvLoad (CASCADE_NAME, 0, 0, 0);
    CvMemStorage* storage = cvCreateMemStorage(0);
    assert (storage);
	if (! cascade)
        abort ();
	
	CvHaarClassifierCascade* cascadeProfile = (CvHaarClassifierCascade*) cvLoad (CASCADE_NAME_PROFILE, 0, 0, 0);
    CvMemStorage* storageProfile = cvCreateMemStorage(0);
    assert (storageProfile);
	if (! cascadeProfile)
        abort ();
	
	IplImage *gray_image = cvCreateImage(src.size(), IPL_DEPTH_8U, 1);
	IplImage aux = IplImage(src);
	
	cvCvtColor (&aux, gray_image, CV_BGR2GRAY);
	cvEqualizeHist( gray_image, gray_image );
	
	CvSeq* faces = cvHaarDetectObjects (gray_image, cascade, storage, 1.1, 3, CV_HAAR_DO_CANNY_PRUNING, cvSize (25, 25));
	CvSeq* facesProfiles = cvHaarDetectObjects (gray_image, cascadeProfile, storageProfile, 1.1, 3, CV_HAAR_DO_CANNY_PRUNING, cvSize (25, 25));
	
	double minValue = 10000.0;
	RotatedRect minRect;
	
	model->updateModel(auxImg);
	if (draw) cvNamedWindow("ROI");
	
	for (int i = 0; i < (faces ? faces->total : 0); i++){
		CvRect* r = (CvRect*) cvGetSeqElem (faces, i);
		RotatedRect auxRect(Point2i(r->x+r->width/2,r->y+r->height/2),Size2i(r->width,r->height),0);
		auxRect = scaleRect(auxRect, cv::Size2f(scaleFactor.width, scaleFactor.height));
		if (draw) drawRotatedRect(auxImg2, auxRect,CV_RGB(100,50,50) , 2);
		
		
		if(model->ModelType == COV_FULL_IMAGE){
			//minFeature = (GenericFeature *)new CovarianceFullDescriptor(auxRect,model->tracker_param);
			CV_Assert(false);
		}
		else if(model->ModelType == COV_SUB_WINDOWS)
			minFeature = (GenericFeature *)new CovariancePatchDescriptor(auxRect,model->tracker_param);
		else if(model->ModelType == COV_SUB_WINDOWS_B)
			minFeature = (GenericFeature *)new CovariancePatchDescriptor(auxRect,model->tracker_param);
		
		minFeature->computeFeature(model);
		double dist = model->distance(minFeature);
		
		if (dist<minValue) {
			minValue = dist;
			minRect = auxRect;
		}
		
		minFeature->clear();
		delete minFeature;
		if (draw){
			cout << "dist: "<<dist<<endl;
			imshow( "ROI", auxImg2);
			cvWaitKey();
		}
		
	}
	
	for (int i = 0; i < (facesProfiles ? facesProfiles->total : 0); i++){
		CvRect* r = (CvRect*) cvGetSeqElem (facesProfiles, i);
		RotatedRect auxRect(Point2i(r->x+r->width/2,r->y+r->height/2),Size2i(r->width,r->height),0);
		auxRect = scaleRect(auxRect, cv::Size2f(scaleFactor.width, scaleFactor.height));
		if (draw) drawRotatedRect(auxImg2, auxRect,CV_RGB(0,0,0) , 2);
		
		if(model->ModelType == COV_FULL_IMAGE){
			//minFeature = (GenericFeature *)new CovarianceFullDescriptor(auxRect,model->tracker_param);
			CV_Assert(false);
		}
		else if(model->ModelType == COV_SUB_WINDOWS)
			minFeature = (GenericFeature *)new CovariancePatchDescriptor(auxRect,model->tracker_param);
		else if(model->ModelType == COV_SUB_WINDOWS_B)
			minFeature = (GenericFeature *)new CovariancePatchDescriptor(auxRect,model->tracker_param);
		
		minFeature->computeFeature(model);
		double dist = model->distance(minFeature);
		
		
		if (dist<minValue) {
			minValue = dist;
			minRect = auxRect;
		}
		
		minFeature->clear();
		delete minFeature;
		if (draw){
			cout << "dist: "<<dist<<endl;
			imshow( "ROI", auxImg2);
			cvWaitKey();
		}	
	}	
	
	
	if (draw){
		drawRotatedRect(auxImg2, minRect,CV_RGB(255,0,0) , 3);	
		imshow( "ROI", auxImg2);
		cvWaitKey();
		cvDestroyWindow("ROI");
	}
	auxImg2.release();
	auxImg.release();
	
	cvReleaseImage(&gray_image);
	
	cvClearMemStorage(storage);
	cvClearMemStorage(storageProfile);
	
	return scaleRect(minRect, cv::Size2f(1/scaleFactor.width, 1/scaleFactor.height));	
}
示例#11
0
void imgproc(const uint8_t *image, int width, int height)
{
  cv::Mat img(height, width, CV_8UC1, const_cast<uint8_t*>(image), width);
  imshow("Original", img);
  cv::waitKey(1);
  return;

  cv::Mat src = img.clone();
  cv::Mat color_src(height, width, CV_8UC3);
  cvtColor(src, color_src, CV_GRAY2RGB);

  // Image processing starts here
  GaussianBlur(src, src, cv::Size(3,3), 0);
  adaptiveThreshold(src, src, 255, cv::ADAPTIVE_THRESH_GAUSSIAN_C, cv::THRESH_BINARY_INV, 5, 3);
  //equalizeHist(src, src);
  // TODO: Can think about using multiple thresholds and choosing one where
  // we can detect a pattern
  //threshold(src, src, 100, 255, cv::THRESH_BINARY_INV);

  imshow("Thresholded", src);

  std::vector<std::vector<cv::Point> > contours;

  findContours(src, contours, CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE);
  //printf("Num contours: %lu\n", contours.size());

  std::vector<double> contour_area, contour_arclength;
  contour_area.resize(contours.size());
  contour_arclength.resize(contours.size());
  std::vector<unsigned int> circle_index;
  for(unsigned int idx = 0; idx < contours.size(); idx++)
  {
    if(contours[idx].size() > 25)
    {
      cv::Mat contour(contours[idx]);
      contour_area[idx] = contourArea(contour);
      if(contour_area[idx] > 50)
      {
        contour_arclength[idx] = arcLength(contour,true);
        float q = 4*M_PI*contour_area[idx] /
            (contour_arclength[idx]*contour_arclength[idx]);
        if(q > 0.8f)
        {
          circle_index.push_back(idx);
          //printf("isoperimetric quotient: %f\n", q);
          //Scalar color( rand()&255, rand()&255, rand()&255 );
          //drawContours(contours_dark, contours, idx, color, 1, 8);
        }
      }
    }
  }
  std::list<Circle> circles;
  for(unsigned int i = 0; i < circle_index.size(); i++)
  {
    Circle c;
    cv::Moments moment = moments(contours[circle_index[i]]);
    float inv_m00 = 1./moment.m00;
    c.center = cv::Point2f(moment.m10*inv_m00, moment.m01*inv_m00);
    c.radius = (sqrtf(contour_area[circle_index[i]]/M_PI) + contour_arclength[circle_index[i]]/(2*M_PI))/2.0f;
    circles.push_back(c);
  }

  // Get the circles with centers close to each other
  std::vector<std::list<Circle> > filtered_circles;
  std::list<Circle>::iterator it = circles.begin();
  unsigned int max_length = 0;
  while(it != circles.end())
  {
    std::list<Circle> c;
    c.push_back(*it);

    cv::Point c1 = it->center;

    std::list<Circle>::iterator it2 = it;
    it2++;
    while(it2 != circles.end())
    {
      cv::Point c2 = it2->center;
      std::list<Circle>::iterator it3 = it2;
      it2++;
      if(hypotf(c2.x - c1.x, c2.y - c1.y) < 10)
      {
        c.push_back(*it3);
        circles.erase(it3);
      }
    }
    unsigned int length_c = c.size();
    if(length_c > 1 && length_c > max_length)
    {
      max_length = length_c;
      filtered_circles.push_back(c);
    }

    it2 = it;
    it++;
    circles.erase(it2);
  }

  if(filtered_circles.size() > 0)
  {
    Circle target_circle;
    target_circle.radius = std::numeric_limits<float>::max();

    for(it = filtered_circles.back().begin(); it != filtered_circles.back().end(); it++)
    {
      //printf("circle: c: %f, %f, r: %f\n", it->center.x, it->center.y, it->radius);
      if(it->radius < target_circle.radius)
      {
        target_circle.radius = it->radius;
        target_circle.center = it->center;
      }
    }
    circle(color_src, cv::Point(target_circle.center.x, target_circle.center.y), target_circle.radius, cv::Scalar(0,0,255), 2);
    printf("target: c: %f, %f, r: %f\n", target_circle.center.x, target_circle.center.y, target_circle.radius);

  }
#if defined(CAPTURE_VIDEO)
  static cv::VideoWriter video_writer("output.mp4", CV_FOURCC('M','J','P','G'), 20, cv::Size(width, height));
  video_writer.write(color_src);
#endif
  imshow("Target", color_src);
  cv::waitKey(1);
}
示例#12
0
bool searchFace(const Mat& src, RotatedRect rect){
	
	
	CvHaarClassifierCascade* cascade = (CvHaarClassifierCascade*) cvLoad (CASCADE_NAME, 0, 0, 0);
    CvMemStorage* storage = cvCreateMemStorage(0);
    assert (storage);
	if (! cascade)
        abort ();
	
	CvHaarClassifierCascade* cascadeProfile = (CvHaarClassifierCascade*) cvLoad (CASCADE_NAME_PROFILE, 0, 0, 0);
    CvMemStorage* storageProfile = cvCreateMemStorage(0);
    assert (storageProfile);
	if (! cascadeProfile)
        abort ();
	
	IplImage *gray_image = cvCreateImage(src.size(), IPL_DEPTH_8U, 1);
	IplImage aux = IplImage(src);
	
	rect.size.width *= 1.5;
	rect.size.height *= 1.5;
	
	cvCvtColor (&aux, gray_image, CV_BGR2GRAY);
	cvEqualizeHist( gray_image, gray_image );
	cvSetImageROI(gray_image, getBoundingRect(rect));
	
	CvSeq* faces = cvHaarDetectObjects (gray_image, cascade, storage, 1.1, 3, CV_HAAR_DO_CANNY_PRUNING, cvSize (10, 10));
	CvSeq* facesProfiles = cvHaarDetectObjects (gray_image, cascadeProfile, storageProfile, 1.1, 3, CV_HAAR_DO_CANNY_PRUNING, cvSize (10, 10));
	
	for (int i = 0; i < (faces ? faces->total : 0); i++){
		CvRect* r = (CvRect*) cvGetSeqElem (faces, i);
		
		CvPoint center;
		int radius;
		center.x = cvRound((r->width*0.5 + r->x));
		center.y = cvRound((r->y + r->height*0.5));
		radius = cvRound((r->width + r->height)*0.25);
		cvCircle (gray_image, center, radius, CV_RGB(0,255,0), 3, 8, 0 );

	}
	
	for (int i = 0; i < (facesProfiles ? facesProfiles->total : 0); i++){
		CvRect* r = (CvRect*) cvGetSeqElem (facesProfiles, i);
		CvPoint center;
		int radius;
		center.x = cvRound((r->width*0.5 + r->x));
		center.y = cvRound((r->y + r->height*0.5));
		radius = cvRound((r->width + r->height)*0.25);
		cvCircle (gray_image, center, radius, CV_RGB(0,255,0), 3, 2, 0 );
	}
	
	
	cvNamedWindow("ROI");
	imshow( "ROI", gray_image);
	//cvWaitKey();
	
	cvResetImageROI(gray_image);
	cvReleaseImage(&gray_image);
	
	cvClearMemStorage(storage);
	
	if (faces->total>0 || facesProfiles->total>0)return true;
	else return false;
}
/**
 * @brief Processed a search request.
 * @param request the request to proceed.
 */
u_int32_t ORBSearcher::searchImage(SearchRequest &request)
{
    timeval t[5];
    gettimeofday(&t[0], NULL);

    cout << "Loading the image and extracting the ORBs." << endl;

    Mat img;
    u_int32_t i_ret = ImageLoader::loadImage(request.imageData.size(),
                                             request.imageData.data(), img);
    if (i_ret != OK)
        return i_ret;

    vector<KeyPoint> keypoints;
    Mat descriptors;

    ORB(1000, 1.02, 100)(img, noArray(), keypoints, descriptors);

    gettimeofday(&t[1], NULL);

    cout << "time: " << getTimeDiff(t[0], t[1]) << " ms." << endl;
    cout << "Looking for the visual words. " << endl;

    unordered_map<u_int32_t, list<Hit> > imageReqHits; // key: visual word, value: the found angles
    for (unsigned i = 0; i < keypoints.size(); ++i)
    {
        #define NB_NEIGHBORS 1

        vector<int> indices(NB_NEIGHBORS);
        vector<int> dists(NB_NEIGHBORS);
        wordIndex->knnSearch(descriptors.row(i), indices,
                           dists, NB_NEIGHBORS);

        for (unsigned j = 0; j < indices.size(); ++j)
        {
            const unsigned i_wordId = indices[j];
            if (imageReqHits.find(i_wordId) == imageReqHits.end())
            {
                // Convert the angle to a 16 bit integer.
                Hit hit;
                hit.i_imageId = 0;
                hit.i_angle = keypoints[i].angle / 360 * (1 << 16);
                hit.x = keypoints[i].pt.x;
                hit.y = keypoints[i].pt.y;

                imageReqHits[i_wordId].push_back(hit);
            }
        }
    }

    cout << imageReqHits.size() << " visual words kept for the request." << endl;

    const unsigned i_nbTotalIndexedImages = index->getTotalNbIndexedImages();
    cout << i_nbTotalIndexedImages << " images indexed in the index." << endl;

    unordered_map<u_int32_t, vector<Hit> > indexHits; // key: visual word id, values: index hits.
    index->getImagesWithVisualWords(imageReqHits, indexHits);

    gettimeofday(&t[2], NULL);
    cout << "time: " << getTimeDiff(t[1], t[2]) << " ms." << endl;
    cout << "Ranking the images." << endl;

    unordered_map<u_int32_t, float> weights; // key: image id, value: image score.

    for (unordered_map<u_int32_t, vector<Hit> >::const_iterator it = indexHits.begin();
        it != indexHits.end(); ++it)
    {
        const vector<Hit> &hits = it->second;

        const float f_weight = log((float)i_nbTotalIndexedImages / hits.size());

        for (vector<Hit>::const_iterator it2 = hits.begin();
             it2 != hits.end(); ++it2)
        {
            /* TF-IDF according to the paper "Video Google:
             * A Text Retrieval Approach to Object Matching in Videos" */
            unsigned i_totalNbWords = index->countTotalNbWord(it2->i_imageId);
            weights[it2->i_imageId] += f_weight / i_totalNbWords;
        }
    }

    priority_queue<SearchResult> rankedResults;
    for (tr1::unordered_map<unsigned, float>::const_iterator it = weights.begin();
         it != weights.end(); ++it)
        rankedResults.push(SearchResult(it->second, it->first));

    gettimeofday(&t[3], NULL);
    cout << "time: " << getTimeDiff(t[2], t[3]) << " ms." << endl;
    cout << "Reranking 300 among " << rankedResults.size() << " images." << endl;

    priority_queue<SearchResult> rerankedResults;
    reranker.rerank(imageReqHits, indexHits,
                    rankedResults, rerankedResults, 300);

    gettimeofday(&t[4], NULL);
    cout << "time: " << getTimeDiff(t[3], t[4]) << " ms." << endl;
    cout << "Returning the results. " << endl;

    returnResults(rerankedResults, request, 100);

#if 0
    // Draw keypoints and ellipses.
    Mat img_res;
    drawKeypoints(img, cleanKeypoints, img_res, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
    for (unsigned i = 0; i < ellipses.size(); ++i)
        ellipse( img_res, ellipses[i], Scalar(0, 0, 255), 1);

    // Show the image.
    imshow("Keypoints 1", img_res);
#endif

    return SEARCH_RESULTS;
}
示例#14
0
void camera_contours_display(int num, Straightener & straight) {
	int c;
		IplImage* color_img;
		CvCapture* cv_cap = cvCaptureFromCAM(num);
		cvNamedWindow("Video", 0); // create window
		resizeWindow("Video", 700,700);
		for(;;) {
			color_img = cvQueryFrame(cv_cap); // get frame
			if(color_img != 0) {
				Mat cam_mat(color_img);
				Mat result;
				cam_mat.copyTo(result);

				if(straight.doAll(cam_mat, result)) {
					///Apply blur
					blur(result, result, Size(3,3));
					///Apply Canny to destination Matrix
					Canny(result, result, 50, 50, 3);
					/// Vectors for storing contours
					vector<vector<Point> > contours; //contours of the paper sheet
					vector<vector<Point> > approx_contours; //approx contours of the paper sheet
					vector<Vec4i> hierarchy;
					int erosion_type = 2;
					int erosion_size = 3;
					Mat element = getStructuringElement(erosion_type,
														Size( 2*erosion_size + 1, 2*erosion_size+1),
														Point( erosion_size, erosion_size));
					dilate(result, result, element);
					/// Cut 20 px from each side to avoid paper borders detection
					result = result(Rect(10, 10, result.cols-20, result.rows-20));
					findContours(result, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE, Point(0, 0));
					/// Draw contours
					Mat drawing = Mat::zeros( result.size(), CV_8UC3 );
					/// https://github.com/Itseez/opencv/blob/master/samples/cpp/contours2.cpp
//					approx_contours.resize(contours.size());
					for(unsigned int i = 0; i < contours.size(); i++) {
						/// Area of more than 20 and no parent
						if(contourArea(contours[i]) > 20 && hierarchy[i][3] == -1) {
							vector<Point> tmp_contour;
							approxPolyDP(Mat(contours[i]), tmp_contour, 3, true);
							approx_contours.push_back(tmp_contour);
						}
					}
					for(unsigned int i=0; i < approx_contours.size(); i++) {
						Scalar color;
						if(approx_contours[i].size() == 4) {
							color = Scalar( 255, 255, 255);
							drawContours( drawing, approx_contours, i, color, 1, 8, NULL, 0, Point() );
						}
						else {
							color = Scalar( 0, 255, 0);
							drawContours( drawing, approx_contours, i, color, 1, 8, NULL, 0, Point() );
						}
					}
					imshow("Video", drawing);
				}
			}
			c = cvWaitKey(10); // wait 10 ms or for key stroke
			if(c == 27)

				break; // if ESC, break and quit
		}
		/* clean up */
		cvReleaseCapture( &cv_cap );
		cvDestroyWindow("Video");
}
int main_track(){

	//some boolean variables for added functionality
	bool objectDetected = false;
	//these two can be toggled by pressing 'd' or 't'
	bool debugMode = false;
	bool trackingEnabled = false;
	//pause and resume code
	bool pause = false;
	//set up the matrices that we will need
	//the two frames we will be comparing
	Mat frame1,frame2;
	//their grayscale images (needed for absdiff() function)
	Mat grayImage1,grayImage2;
	//resulting difference image
	Mat differenceImage;
	//thresholded difference image (for use in findContours() function)
	Mat thresholdImage;
	//video capture object.
	VideoCapture capture;
	capture.open(0);	
	if(!capture.isOpened()){
			cout<<"ERROR ACQUIRING VIDEO FEED\n";
			getchar();
			return -1;
		}
	while(1){

		//we can loop the video by re-opening the capture every time the video reaches its last frame

		//capture.open("C:\\Users\\Ran_the_User\\Documents\\GitHub\\3Drobot\\camera3D\\src\\externals\\bouncingBall.avi");
	
	

		//check if the video has reach its last frame.
		//we add '-1' because we are reading two frames from the video at a time.
		//if this is not included, we get a memory error!
		//while(capture.get(CV_CAP_PROP_POS_FRAMES)<capture.get(CV_CAP_PROP_FRAME_COUNT)-1)
		{

			//read first frame
			capture.read(frame1);
			//convert frame1 to gray scale for frame differencing
			cv::cvtColor(frame1,grayImage1,COLOR_BGR2GRAY);
			//copy second frame
			capture.read(frame2);
			//convert frame2 to gray scale for frame differencing
			cv::cvtColor(frame2,grayImage2,COLOR_BGR2GRAY);
			//perform frame differencing with the sequential images. This will output an "intensity image"
			//do not confuse this with a threshold image, we will need to perform thresholding afterwards.
			cv::absdiff(grayImage1,grayImage2,differenceImage);
			//threshold intensity image at a given sensitivity value
			cv::threshold(differenceImage,thresholdImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
			if(debugMode==true){
				//show the difference image and threshold image
				cv::imshow("Difference Image",differenceImage);
				cv::imshow("Threshold Image", thresholdImage);
			}else{
				//if not in debug mode, destroy the windows so we don't see them anymore
				cv::destroyWindow("Difference Image");
				cv::destroyWindow("Threshold Image");
			}
			//blur the image to get rid of the noise. This will output an intensity image
			cv::blur(thresholdImage,thresholdImage,cv::Size(BLUR_SIZE,BLUR_SIZE));
			//threshold again to obtain binary image from blur output
			cv::threshold(thresholdImage,thresholdImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
			if(debugMode==true){
				//show the threshold image after it's been "blurred"

				imshow("Final Threshold Image",thresholdImage);

			}
			else {
				//if not in debug mode, destroy the windows so we don't see them anymore
				cv::destroyWindow("Final Threshold Image");
			}

			//if tracking enabled, search for contours in our thresholded image
			if(trackingEnabled){

				searchForMovement(thresholdImage,frame1);
			}

			//show our captured frame
			imshow("Frame1",frame1);
			//check to see if a button has been pressed.
			//this 10ms delay is necessary for proper operation of this program
			//if removed, frames will not have enough time to referesh and a blank 
			//image will appear.
			switch(waitKey(10)){

			case 27: //'esc' key has been pressed, exit program.
				return 0;
			case 116: //'t' has been pressed. this will toggle tracking
				trackingEnabled = !trackingEnabled;
				if(trackingEnabled == false) cout<<"Tracking disabled."<<endl;
				else cout<<"Tracking enabled."<<endl;
				break;
			case 100: //'d' has been pressed. this will debug mode
				debugMode = !debugMode;
				if(debugMode == false) cout<<"Debug mode disabled."<<endl;
				else cout<<"Debug mode enabled."<<endl;
				break;
			case 112: //'p' has been pressed. this will pause/resume the code.
				pause = !pause;
				if(pause == true){ cout<<"Code paused, press 'p' again to resume"<<endl;
				while (pause == true){
					//stay in this loop until 
					switch (waitKey()){
						//a switch statement inside a switch statement? Mind blown.
					case 112: 
						//change pause back to false
						pause = false;
						cout<<"Code Resumed"<<endl;
						break;
					}
				}
				}


			}


		}
		//release the capture before re-opening and looping again.
		//capture.release();
	}

	return 0;

}
void pintaI(string im) {
    namedWindow("pinta Imagen", WINDOW_AUTOSIZE);
    imshow("pinta Imagen", leeimagen(im, -1));
    waitKey(0);
    destroyWindow("pinta Imagen");
}
示例#17
0
Mat  Kohonen::getFrame(Mat parFrame)
{
    int map_dim=2;//размерность карты
    unsigned N[map_dim];
    unsigned u, v, q, index_winner;
    N[0] = 7;//количество нейронов в каждом слое
    N[1] = 7;

    //определение константы для вывода каждого класса определенным цветом
    uint32_t A=1;
    for (int i=0;i<map_dim; i++)
    {
        A*=N[i];//подсчет общего числа нейрнов
    }
    A=0xFFFFFF/A;

    fmll_som * som;
    Mat resultFrame;
    resultFrame.create(parFrame.rows, parFrame.cols,CV_8UC3);
    Mat frameFromUnsigned;
    frameFromUnsigned.create(parFrame.rows, parFrame.cols,CV_32S);

    Vec3b pixel;
    double ** vec;
    //инициализация векторного пространства значениями яркости изображения
    vec = (double **) fmll_alloc(sizeof(double), 2, parFrame.rows * parFrame.cols, 3);
    som = fmll_som_load(somFilename.c_str(),& fmll_distance_euclid, & fmll_distance_euclid);
    for(v = 0, q = 0; v < parFrame.rows; v++)
    for(u = 0; u < parFrame.cols; u++, q++)
    {
        pixel=parFrame.at<Vec3b>(v,u);

        vec[q][0] = pixel[0] / 255.;
        vec[q][1] = pixel[1] / 255.;
        vec[q][2] = pixel[2] / 255.;
    }
    //загрузка карты из xml
    if(som == NULL)
    {
        fmll_random * rnd;
        double param[2];
        param[0] = 0;
        param[1] = 0.01;
        //инициализация датчика случайных чисел
        rnd = fmll_random_init(FMLL_RANDOM_ALGORITHM_LCG, FMLL_RANDOM_DISTRIBUTION_UNIFORM, param, time(NULL));
        //иницилизация нейронной карты
        som = fmll_som_init(N, map_dim, 3, & fmll_distance_euclid, & fmll_distance_euclid);//(1)расстояние между векторами, 2)расстояние между нейронами.(2)не исп.,т.к. алгоритм WTA)
        //иницилизация весов синапсов нейронов карты случайными числами
        fmll_som_weight_init_random(som, rnd);

        cout<<"don't load"<<endl;

            //обучение нейронной карты
            fmll_som_so_kohonen(som, (const double **) vec, parFrame.rows * parFrame.cols,
                                        0.3, & fmll_timing_next_beta_step_plus_0_1, 0.8, 0.002, & fmll_som_neighbor_wta);
            if (fmll_som_save(som,somFilename.c_str())!=0)
            {
                cout<<"Ошибка сохранения Som в Xml"<<endl;
            }
            fmll_random_destroy(rnd);
   }
            for(v = 0, q = 0; v < parFrame.rows; v++)
            {
                for(u = 0; u < parFrame.cols; u++, q++)
                {
                   index_winner = fmll_som_run(som, vec[q]);
                   uint32_t color=A*index_winner;
                   pixel[0]=0x0000FF&color;
                   pixel[1]=(0x00FF00&color)>>8;
                   pixel[2]=(0xFF0000&color)>>16;
                   //pixel[0] = som->w[index_winner][0];
                   //pixel[1] = som->w[index_winner][1];
                   //pixel[2] = som->w[index_winner][2];
                   resultFrame.at<Vec3b>(v,u)=pixel;
                   frameFromUnsigned.at<uint32_t>(v,u)=color;
                   //cout<<color<<" ";
                }
                 //cout<<endl;
             }
            fmll_free(vec);
    imshow( "kohonen", resultFrame );
    fmll_som_destroy(som);
    return frameFromUnsigned;
}
cv::Mat PatchMatchDescriptor::apply(cv::Mat source, cv::Mat target, int iterations, int patchSize) {
	assert(iterations > 0 && "Iterations must be a strictly positive integer\n");
	assert(patchSize >= 3 && (patchSize & 1) && "Patch size must be at least 3 and odd\n");

	// convert patch diameter to patch radius
	//patchSize = patchSize/2+0.5;

	// Creation des 2 descripteurs
	// Les calculs se font a ce moment
	std::cout << "patchSize : " << patchSize << std::endl;
	std::cout << "1" << std::endl;
	Descriptor desSource(source, patchSize/2);
	std::cout << "1.5" << std::endl;
	Descriptor desTarget(target, patchSize/2);
	std::cout << "2" << std::endl;

	cv::Mat out = cv::Mat::zeros(target.rows, target.cols,CV_8UC3);



	ImageCorrespondance corres(target.cols, target.rows);

	// INITIALIZATION - uniform random assignment
	for(int x=patchSize/2; x < out.cols-patchSize/2; x++) {
		for(int y=patchSize/2; y < out.rows-patchSize/2; y++) {
			int dx = rand() % (source.cols - 1);
			int dy = rand() % (source.rows - 1);

			Vec * vector = &corres.vectors[x][y];
			vector->x = dx;
			vector->y = dy;
			vector->dist = distanceDescriptor(desSource, desTarget, dx, dy, x, y);

			if (vector->dist == HUGE_VAL) {vector->x = 0; vector->y = 0; dx=dy=0;}
			attributePixels(out, x, y, source, dx, dy);
		}
	}

	bool forwardSearch = true;

	Vec * outPtr;
	for (int i=0; i < iterations; i++) {
		std::cout << "Iteration n " << i+1 << " / " << iterations << std::endl;

		// PROPAGATION
		int nbChange = 0;
		if(forwardSearch) {
			// Forward propagation - compare left, center and up
			for(int x = patchSize/2; x < out.cols-patchSize/2; x++) {
				for(int y = patchSize/2; y < out.rows-patchSize/2; y++) {
					outPtr = &corres.vectors[x][y];

					Vec * left = &corres.vectors[x-1][y];

					double distLeft = distanceDescriptor(desSource, desTarget, left->x, left->y, x, y);

					if (distLeft < outPtr->dist) {
						outPtr->x = left->x;
						outPtr->y = left->y;
						outPtr->dist = distLeft;

						attributePixels(out, x, y, source, outPtr->x, outPtr->y);

						nbChange++;
					}

					Vec * up = &corres.vectors[x][y-1];
					double distUp = distanceDescriptor(desSource, desTarget, up->x, up->y, x, y);

					if (distUp < outPtr->dist) {
						outPtr->x = up->x;
						outPtr->y = up->y;
						outPtr->dist = distUp;

						attributePixels(out, x, y, source, outPtr->x, outPtr->y);

						nbChange++;
					}
				}

				imshow("patch match", out);
				cv::waitKey(1);
				
			}


			std::cout<<nbChange<<std::endl;
		}
		else {
			// Backward propagation - compare right, center and down
			for(int x = out.cols-patchSize/2; x >= patchSize/2; x--) {
				for(int y = out.rows-patchSize/2; y >= patchSize/2; y--) {	
					outPtr = &corres.vectors[x][y];

					Vec * right = &corres.vectors[x+1][y];

					double distRight = distanceDescriptor(desSource, desTarget, right->x, right->y, x, y);

					if (distRight < outPtr->dist) {
						outPtr->x = right->x;
						outPtr->y = right->y;
						outPtr->dist = distRight;

						attributePixels(out, x, y, source, outPtr->x, outPtr->y);

						nbChange++;
					}

					Vec * down = &corres.vectors[x][y+1];
					double distDown = distanceDescriptor(desSource, desTarget, down->x, down->y, x, y);

					if (distDown < outPtr->dist) {
						outPtr->x = down->x;
						outPtr->y = down->y;
						outPtr->dist = distDown;

						attributePixels(out, x, y, source, outPtr->x, outPtr->y);

						nbChange++;
					}
				}

				imshow("patch match", out);
				cv::waitKey(1);
			}

			std::cout<<nbChange<<std::endl;
		}

		forwardSearch = !forwardSearch;

		for(int x = patchSize/2; x < target.cols-patchSize/2; x++) {
			for(int y = patchSize/2; y < target.rows-patchSize/2; y++) {

				int radius = source.rows > source.cols ? source.rows : source.cols;

				// search an exponentially smaller window each iteration
				while (radius > 1) {
					// Search around current offset vector (distance-weighted)

					// clamp the search window to the image
					outPtr = &corres.vectors[x][y];
					int minX = (int)outPtr->x - radius;
					int maxX = ((int)(outPtr->x + radius)) + 1;
					int minY = (int)outPtr->y - radius;
					int maxY = ((int)(outPtr->y + radius)) + 1;
					if (minX < 0) minX = patchSize/2;
					if (maxX > source.cols) maxX = source.cols-patchSize/2;
					if (minY < 0) minY = patchSize/2;
					if (maxY > source.rows) maxY = source.rows-patchSize/2;

					int randX = rand() % (maxX - minX) + minX;
					int randY = rand() % (maxY - minY) + minY;

					Vec * random = &corres.vectors[randX][randY];
					double dist = distanceDescriptor(desSource, desTarget, random->x, random->y, x, y);
					if (dist < outPtr->dist) {
						outPtr->x = random->x;
						outPtr->y = random->y;
						outPtr->dist = dist;

						attributePixels(out, x, y, source, outPtr->x, outPtr->y);
					}

					radius >>= 1;
				}
			}

			//imshow("patch match", out);
			//cv::waitKey(1);
		}
	}
	cv::waitKey();
	return out;
}
示例#19
0
// display function should be good enough
void OpenRadar::DrawRadarData()
{
	int usualColor[15] = {16777215,255,128,65280,32768,
		      16711680,16711935,8421376,65535,32896 }; /*<usual color*/
	CvPoint pt1, pt2;

	cvZero(RadarImage);
	cvCircle(RadarImage, cvPoint(DisplayDx,DisplayDy),3, CV_RGB(0,255,255), -1, 8,0);
	int x,y;
	unsigned char * pPixel = 0;
	int colorIndex = 0, colorRGB;
	int R = 255, G = 0, B = 0;
    
	for (int i = 0; i < RadarDataCnt;i++)
	{  
		if (RadarRho[i] < 0)
		{
			
			//change color
			colorRGB = usualColor[colorIndex];
			R = colorRGB/65536;
			G = (colorRGB%65536)/256;
			B = colorRGB%256;
			colorIndex = (colorIndex + 1)%10;
			
		}
		else 
		{
			x = (int)(RadarRho[i]*cos(RadarTheta[i])/DisplayRatio) + DisplayDx;
			y = (int)(-RadarRho[i]*sin(RadarTheta[i])/DisplayRatio)+ DisplayDy;
	
			if (x >= 0 && x < RadarImageWdith && y >= 0 && y < RadarImageHeight)
			{
				pPixel = (unsigned char*)RadarImage->imageData + y*RadarImage->widthStep + 3*x;
				pPixel[0] = B;
				pPixel[1] = G;
				pPixel[2] = R;
			}
		}     
	}
	
	pt1.x = DisplayDx; pt1.y = DisplayDy;
	pt2.x = DisplayDx+line_length*v_scale*sin(v_angle + 0.5*M_PI); 
	pt2.y = DisplayDy+line_length*v_scale*cos(v_angle + 0.5*M_PI);
	cvLine(RadarImage, pt1, pt2, CV_RGB(255,255,255),2,8,0);

	pt2.x = DisplayDx+line_length*cos(-(-120 + skip_bin_idx * polarH_resolution)* M_PI/180 ); 	
	pt2.y = DisplayDy+line_length*sin(-(-120 + skip_bin_idx * polarH_resolution)* M_PI/180 ); 
	cvLine(RadarImage, pt1, pt2, CV_RGB(0,255,0),1,8,0);

	pt2.x = DisplayDx+line_length*cos(-(-120 + (polarH_length-skip_bin_idx) * polarH_resolution)* M_PI/180 ); 
	pt2.y = DisplayDy+line_length*sin(-(-120 + (polarH_length-skip_bin_idx) * polarH_resolution)* M_PI/180 ); 
	//pt2.x = DisplayDx+line_length*cos(0.25*M_PI); 
	//pt2.y = DisplayDy+line_length*sin(0.25*M_PI);
	//cout<< line_length <<endl; 
	//cout<< pt1.x <<" , " << pt1.y <<endl;
	//cout<< pt2.x <<" , " << pt2.y <<endl;
	cvLine(RadarImage, pt1, pt2, CV_RGB(0,255,0),1,8,0);

	float angle;
	int line_length2;
	for (int i=0; i<polarH_length;i++)
	{
		angle = (-30+i*polarH_resolution)*M_PI/180;
		line_length2 = H[i]/10;
		pt2.x = DisplayDx+line_length2*sin(angle); 
		pt2.y = DisplayDy+line_length2*cos(angle);
		cvCircle(RadarImage, pt2, 2, CV_RGB(255,255,255),1,8,0);
	}

	////////////////////////////////////////////////////////////////////////////////////
	// mine
	////////////////////////////////////////////////////////////////////////////////////
	Mat binImg = Mat::zeros(RadarImageHeight,RadarImageWdith,CV_8UC1);
	vector< Point> centerRaw;
	centerRaw.clear();
	for (int i = 0; i < RadarDataCnt;i++)
	{  
		if (RadarRho[i] > 200)
		{
			x = (int)(RadarRho[i]*cos(RadarTheta[i])/DisplayRatio) + DisplayDx;
			y = (int)(-RadarRho[i]*sin(RadarTheta[i])/DisplayRatio)+ DisplayDy;
			//centerRaw.push_back(Point(x,y));
			//cout<<"P:" <<centerRaw[i].x<<","<<centerRaw[i].y<<endl;
			if (x >= 0 && x < RadarImageWdith && y >= 0 && y < RadarImageHeight)
			{
				 circle( binImg,Point(x,y),1,Scalar(255),-1);
			}
		}     
	}
	imshow("binImg",binImg);
	Mat element = getStructuringElement(MORPH_RECT, Size(1,2));
	Mat element2 = getStructuringElement(MORPH_RECT, Size(10,10));
	erode(binImg, binImg, element);
	morphologyEx(binImg, binImg, MORPH_OPEN, element);
	dilate(binImg, binImg, element2);
	morphologyEx(binImg, binImg, MORPH_CLOSE, element2);
	imshow("dilate",binImg);

	vector< vector<Point> > contours;	
	vector< vector<Point> > filterContours;	
	vector< Vec4i > hierarchy;	
	vector< Point2f> center;
	vector< float > radius;
	vector<Point2f> realPoint;
	

	findContours(binImg, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
	center.resize(contours.size());
	radius.resize(contours.size());
	//realPoint.resize(contours.size());
	for(int i = 0; i< contours.size(); i++)
	{
		minEnclosingCircle(Mat(contours[i]),center[i],radius[i]);//对轮廓进行多变形逼近
		circle(binImg,center[i],650/DisplayRatio,Scalar(255),1); 
		//cout<<"No."<<i<<" | P: "<< center[i].x<<","<<center[i].y<<endl;
		float realX = (center[i].x - DisplayDx) * DisplayRatio;
		float realY = (center[i].y - DisplayDy) * DisplayRatio;

		realPoint.push_back(Point2f(realX,realY));
		//cout<<"No."<<i<<" | P: "<< realPoint[i].x<<","<<realPoint[i].y<<endl;
	}
	imshow("findContours",binImg);
	// colar map
	Mat mapImg = Mat::zeros(RadarImageHeight,RadarImageWdith,CV_8UC3);
	circle(mapImg, Point(DisplayDx,DisplayDy),3, CV_RGB(255,255,255),-1);
	line(mapImg, Point(DisplayDx,DisplayDy), Point(DisplayDx+40,DisplayDy), Scalar(0,0,255),1);
	line(mapImg, Point(DisplayDx,DisplayDy), Point(DisplayDx,DisplayDy+40), Scalar(0,255,0),1);
	for(int i = 0; i< center.size(); i++)
	{
		circle(mapImg,center[i],650/DisplayRatio,Scalar(255,255,0),1,CV_AA); 
		circle(mapImg,center[i],100/DisplayRatio,Scalar(0,255,255),-1); 
	}
	imshow("Map",mapImg);
	////////////////////////////////////
	ukftest::laserPoint msg;
	vector <float> xvec;
	vector <float> yvec;
	for(int i = 0 ; i < realPoint.size(); i++)
	{
		// cm
		xvec.push_back(realPoint[i].x/10.0f);
		yvec.push_back(realPoint[i].y/10.0f);
	}

	// msg
	msg.header.stamp = ros::Time::now();
	msg.header.frame_id = "hokuyo_laser";
	msg.x =xvec;
	msg.y =yvec;
	if(realPoint.size() >0) msg.isBlocking = 1;
	else msg.isBlocking = 0;
	pub_xy. publish(msg);
	
}
示例#20
0
文件: Calib.cpp 项目: ALX5/PJS
Calib::Calib()
{
    Size boardSize(6,5); // Chessboard' size in corners with both color (nb of squares -1)
    int widthSquare = 40; // Width of a square in mm
    int heightSquare = 27;
    vector <Mat> images;

    // Getting the four images of the chessboard
    string imageFileName = "../src/mire1.jpg";
    images.push_back(imread(imageFileName, 1));

    imageFileName = "../src/mire2.jpg";
    images.push_back(imread(imageFileName, 1));

    imageFileName = "../src/mire3.jpg";
    images.push_back(imread(imageFileName, 1));

    imageFileName = "../src/mire4.jpg";
    images.push_back(imread(imageFileName, 1));

    Size imageSize = images.at(0).size();

    // Find chessboard's corners in the scene for the 4 images
    vector<vector<Point2f> > cornersScene(1);
    vector<Mat> imagesGray;

    imagesGray.resize(4);

    for (int i=0; i<4; i++)
    {
        if(images.at(i).empty())
        {
            cerr << "Image not read correctly!" << endl;
            exit(-1);
        }

        bool patternFound = findChessboardCorners(images.at(i), boardSize, cornersScene[0]);
        if(!patternFound)
        {
            cerr << "Could not find chess board!" << endl;
            exit(-1);
        }

        // Improve corner's coordinate accuracy
        cvtColor(images.at(i), imagesGray.at(i), CV_RGB2GRAY);
        cornerSubPix(imagesGray.at(i), cornersScene[0], Size(3,2), Size(-1,-1), TermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1));

        // Drawing the corners
        drawChessboardCorners(images.at(i), boardSize, Mat(cornersScene[0]), patternFound );

        imshow("Corners find", images.at(i));

        int keyPressed;
        /*do
        {
            keyPressed = waitKey(0);
        } while (keyPressed != 27);*/
    }

    // Getting the chessboard's corners on the mire's image
    vector<vector<Point3f> > cornersMire(1);

    for( int y = 0; y < boardSize.height; y++ )
    {
        for( int x = 0; x < boardSize.width; x++ )
        {
            cornersMire[0].push_back(cv::Point3f(float(x*widthSquare),
                                                 float(y*heightSquare), 0));
        }
    }

    // Getting the camera's parameters

    Mat distortionCoefficients = Mat::zeros(8, 1, CV_64F);
    Mat cameraMatrix = Mat::eye(3, 3, CV_64F);

    calibrateCamera(cornersMire, cornersScene, imageSize, cameraMatrix,
                    distortionCoefficients, rotationVectors, translationVectors);


    //cout << "Camera matrix: " << cameraMatrix << endl;
    //cout << "Distortion _coefficients: " << distortionCoefficients << endl;
    cout << rotationVectors.at(0) << endl;
    cout << translationVectors.at(0) << endl;

}
int main(int argc, char* argv[])
{
	//if we would like to calibrate our filter values, set to true.
	bool calibrationMode = true;

	//Matrix to store each frame of the webcam feed
	Mat cameraFeed;
	Mat threshold;
	Mat HSV;

	if(calibrationMode){
		//create slider bars for HSV filtering
		createTrackbars();
	}
	//video capture object to acquire webcam feed
	VideoCapture capture;
	//open capture object at location zero (default location for webcam)
	capture.open(0);
	//set height and width of capture frame
	capture.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH);
	capture.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT);
	//start an infinite loop where webcam feed is copied to cameraFeed matrix
	//all of our operations will be performed within this loop
	waitKey(1000);
	while(1){
		//store image to matrix
		capture.read(cameraFeed);

		src = cameraFeed;

  		if( !src.data )
  		{ return -1; }

		//convert frame from BGR to HSV colorspace
		cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);

		if(calibrationMode==true){

		//need to find the appropriate color range values
		// calibrationMode must be false

		//if in calibration mode, we track objects based on the HSV slider values.
			cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
			inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold);
			morphOps(threshold);
			imshow(windowName2,threshold);

		//the folowing for canny edge detec
			/// Create a matrix of the same type and size as src (for dst)
	  		dst.create( src.size(), src.type() );
	  		/// Convert the image to grayscale
	  		cvtColor( src, src_gray, CV_BGR2GRAY );
	  		/// Create a window
	  		namedWindow( window_name, CV_WINDOW_AUTOSIZE );
	  		/// Create a Trackbar for user to enter threshold
	  		createTrackbar( "Min Threshold:", window_name, &lowThreshold, max_lowThreshold);
	  		/// Show the image
			trackFilteredObject(threshold,HSV,cameraFeed);
		}
		else{
			//create some temp fruit objects so that
			//we can use their member functions/information
			Object blue("blue"), yellow("yellow"), red("red"), green("green");

			//first find blue objects
			cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
			inRange(HSV,blue.getHSVmin(),blue.getHSVmax(),threshold);
			morphOps(threshold);
			trackFilteredObject(blue,threshold,HSV,cameraFeed);
			//then yellows
			cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
			inRange(HSV,yellow.getHSVmin(),yellow.getHSVmax(),threshold);
			morphOps(threshold);
			trackFilteredObject(yellow,threshold,HSV,cameraFeed);
			//then reds
			cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
			inRange(HSV,red.getHSVmin(),red.getHSVmax(),threshold);
			morphOps(threshold);
			trackFilteredObject(red,threshold,HSV,cameraFeed);
			//then greens
			cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
			inRange(HSV,green.getHSVmin(),green.getHSVmax(),threshold);
			morphOps(threshold);
			trackFilteredObject(green,threshold,HSV,cameraFeed);

		}
		//show frames
		//imshow(windowName2,threshold);

		imshow(windowName,cameraFeed);
		//imshow(windowName1,HSV);

		//delay 30ms so that screen can refresh.
		//image will not appear without this waitKey() command
		waitKey(30);
	}
	return 0;
}
int main(int argc, const char* argv[])
{
    argc--;
    argv++;
    srand (time(NULL));
    
    db_ranking db;
    
    int change_rate = 10;
    if ( argc > 1 ){
        sscanf(argv[1], "%d", &change_rate);
    }
    
    diagnosis_phase_detector::phase phases[4] = {
        diagnosis_phase_detector::diagnosis_plain,
        diagnosis_phase_detector::diagnosis_green,
        diagnosis_phase_detector::diagnosis_hinselmann,
        diagnosis_phase_detector::diagnosis_schiller
    };
    
    map<diagnosis_phase_detector::phase, string> names;
    names[diagnosis_phase_detector::diagnosis_plain] = "plain";
    names[diagnosis_phase_detector::diagnosis_green] = "green";
    names[diagnosis_phase_detector::diagnosis_hinselmann] = "hinselmann";
    names[diagnosis_phase_detector::diagnosis_schiller] = "schiller";
    
    int current_phase_index = 0;
    diagnosis_phase_detector::phase cphase = phases[current_phase_index];
    
    // Parse the videos specified in the input file
    ifstream fin(argv[0]);
    string next_seq;
    //int counter = 5;
    int vindex = 0;
    
    while ( getline(fin, next_seq) != 0 /*&& counter-- > 0*/ ){
        cout << next_seq << endl;
        // Filaname
        filenames.push_back(next_seq);
        
        // Index
        v_index.push_back(db.get_video_index(next_seq));
        
        // Sequence of frames
        map<diagnosis_phase_detector::phase, vector<int> > next_frames;
        get_sequence(next_seq.c_str(), next_frames); 
        frames.push_back(next_frames);
        
        // Frame generator
        map<diagnosis_phase_detector::phase, layer_generator> next_generator;
        get_generator(next_frames, next_generator);
        generators.push_back(next_generator);
        
        // Annotated instance
        anonadado::instance* next_instance = new anonadado::instance();
        next_instance->read(next_seq.c_str());
        instances.push_back(next_instance);
        cout << "Video " << vindex++ << " done." << endl;
    }
    
    fin.close();
    
    bool has = true;
    bool exit = false;
    bool go_to_next_phase = false;
    
    while ( !exit && has ){
        int remaining = change_rate;
        go_to_next_phase = false;
        cout << "\n\n\n NEEEEXT!\n\n\n";
        cout << current_phase_index << " " << cphase << endl;
        cout << endl << endl;
        
        for ( int level = 0; has && !exit && !go_to_next_phase; level++ )
        {
            cout << "LEVEL " << level << endl;
            //boost::this_thread::sleep( boost::posix_time::seconds(1) );
            
            vector< pair<int, pair< pair<int, int>,
                                    pair<int, int>
                                  >
                        >
                  > pairs;
            
            // Generate pairs <(video, frame), (video, frame)> for this level
            for ( size_t va = 0; va < instances.size(); va++ ){
                vector<int> framesl_a;
                generators[va][cphase].get_next(framesl_a, level);
                
                for ( size_t fa = 0; fa < framesl_a.size(); fa++){
                    for ( size_t vb = 0; vb < va; vb++ ){
                        vector<int> framesl_b;
                        generators[vb][cphase].get_next(framesl_b, level);
                        
                        for ( size_t fb = 0; fb < framesl_b.size(); fb++ ){
                            if ( va == vb && framesl_a[fa] == framesl_b[fb] ){
                                continue;
                            }

                            int priority = 
                                min(db.num_annotated_frames(v_index[va]),
                                    db.num_annotated_frames(v_index[vb]));

                            pairs.push_back(
                                make_pair(priority,
                                          make_pair(make_pair(va,
                                                              framesl_a[fa]),
                                                    make_pair(vb,
                                                              framesl_b[fb])
                                                   )
                                         )
                            );
                        }
                    }
                }
            }
            
            if ( pairs.size() == 0 ){
                has = false;
                break;
            } else {
                has = true;
            }
            
            // Randomly sort these pairs
            vector<int> change_priorities;
            sort(pairs.begin(), pairs.end());
            
            change_priorities.push_back(0);
            for (size_t i = 1; i < pairs.size(); i++){
                if ( pairs[i].first != pairs[i - 1].first ){
                    change_priorities.push_back(i);
                }
            }
            change_priorities.push_back(pairs.size());
            
            for (size_t i = 0; i + 1 < change_priorities.size(); i++){
                random_shuffle(pairs.begin() + change_priorities[i],
                               pairs.begin() + change_priorities[i + 1]);
            }

            // Eval these pairs
            for ( size_t i = 0; i < pairs.size() && !go_to_next_phase; i++ ){
                int va = pairs[i].second.first.first;
                int fa = pairs[i].second.first.second;
                
                int vb = pairs[i].second.second.first;
                int fb = pairs[i].second.second.second;
                
                cout << filenames[va] << " " << filenames[vb] << endl;
                cout << "(" << va << ":" << fa << ") "
                     << "(" << vb << ":" << fb << ") " << endl;
                
                if ( db.exists(cphase,
                               v_index[va], frames[va][cphase][fa],
                               v_index[vb], frames[vb][cphase][fb])
                   )
                {
                    continue;
                }
                
                better = -1;
                bool save = true;
                
                while ( true ){
                    Mat a, b, dst;
                    instances[va]->get_frame(frames[va][cphase][fa], a);
                    instances[vb]->get_frame(frames[vb][cphase][fb], b);
                    draw_ui(a, b, dst);
                    
                    imshow(names[cphase], dst);
                    
                    int key = waitKey(0) % 0x100;
                    if ( key == 81 ){
                        better = 1;
                    } else if ( key == 83 ){
                        better = 0;
                    } else if ( key == 32 ){
                        break;
                    } else if ( key == 27 ){
                        exit = true;
                        has = false;
                        break;
                    } else if ( key == 115 ){       // Skip
                       save = false;
                       break;
                    } else {
                        better = -1;
                    }
                }
                
                if ( exit ){
                    break;
                }
                
                // Save the annotation
                if ( save ){
                    db.insert_annotation(cphase,
                                         v_index[va], frames[va][cphase][fa],
                                         v_index[vb], frames[vb][cphase][fb],
                                         better);
                }
                
                cout << "remaining " << remaining << endl;
                
                remaining--;
                if ( remaining <= 0 ){
                    go_to_next_phase = true;
                    break;
                }
            }
        }
        cout << "go to next\n";
        cvDestroyWindow(names[cphase].c_str());
        current_phase_index = (current_phase_index + 1) % 4;
        cphase = phases[current_phase_index];
        
    }
    
    cout << "Bye!\n";
    return 0;
}
int multiViewFaceDetection(Mat &srcImg, CascadeClassifier &faceCascade, CascadeClassifier &faceCascade2, vector<FacePositionInfo>& storeFpi)
{
	// If the input image is not grayscale, then convert the BGR or BGRA color image to grayscale.
	int facetype = -1;
    Mat srcImgGray;
    if (srcImg.channels() == 3) {
        cvtColor(srcImg, srcImgGray, CV_BGR2GRAY);
    }
    else if (srcImg.channels() == 4) {
        cvtColor(srcImg, srcImgGray, CV_BGRA2GRAY);
    }
    else {
        // Access the input image directly, since it is already grayscale.
        srcImgGray = srcImg;
    }	
	//preprocess
    // Possibly shrink the image, to run much faster.
	int scaledWidth = SCALEDWIDTH;
    Mat inputImg;
    float scale = srcImgGray.cols / (float)scaledWidth;
    if (srcImgGray.cols > scaledWidth) {
        // Shrink the image while keeping the same aspect ratio.
        int scaledHeight = cvRound(srcImgGray.rows / scale);
        resize(srcImgGray, inputImg, Size(scaledWidth, scaledHeight));
    }
    else {
        // Access the input image directly, since it is already small.
        inputImg = srcImgGray;
    }
    // Standardize the brightness and contrast to improve dark images.
    Mat equalizedImg;
    equalizeHist(inputImg, equalizedImg);
	

	Rect faceRect;
	FacePositionInfo tmpfpi;
	// Find the largest face.
	int findface = 0;
#ifdef _DEBUG
	namedWindow("Debug");
#endif
	if(!findface){
		detectLargestObject_afterprocess(equalizedImg, scale, srcImgGray.cols, srcImgGray.rows, faceCascade, faceRect, scaledWidth);
		// Check if a face was detected.		
		if (faceRect.width > 0) {
			rect2FPI(faceRect,tmpfpi,0,srcImgGray.cols,srcImgGray.rows);
			storeFpi.push_back(tmpfpi);
			findface = 1;
			facetype = 1; // front face
		}
	}
	if(!findface){
		//turn to right
		detectLargestObject_afterprocess(equalizedImg, scale, srcImgGray.cols, srcImgGray.rows, faceCascade2, faceRect, scaledWidth);
		// Check if a face was detected.		
		if (faceRect.width > 0) {
#ifdef _DEBUG
			Rect faceRect2;
			faceRect2.x = cvRound(faceRect.x / scale);
            faceRect2.y = cvRound(faceRect.y / scale);
            faceRect2.width = cvRound(faceRect.width / scale);
            faceRect2.height = cvRound(faceRect.height / scale);
			rectangle(equalizedImg,faceRect2,CV_RGB(255,0,0),2,CV_AA);
			imshow("DEBUG",equalizedImg);
#endif
#ifdef _DEBUG
			printf("@@@@Find Profile Face -- RIGHT\n");
#endif
			rect2FPI(faceRect,tmpfpi,0,srcImgGray.cols,srcImgGray.rows);
			storeFpi.push_back(tmpfpi);
			findface = 1;
			facetype = 2; // right face
		}		
		//turn to left
		if(!findface){
			Mat equalizedImg_flip;
			flip(equalizedImg,equalizedImg_flip,1);
			detectLargestObject_afterprocess(equalizedImg_flip, scale, srcImgGray.cols, srcImgGray.rows, faceCascade2, faceRect, scaledWidth);
			// Check if a face was detected.		
			if (faceRect.width > 0) {
#ifdef _DEBUG
			Rect faceRect2;
			faceRect2.x = cvRound(faceRect.x / scale);
            faceRect2.y = cvRound(faceRect.y / scale);
            faceRect2.width = cvRound(faceRect.width / scale);
            faceRect2.height = cvRound(faceRect.height / scale);
			rectangle(equalizedImg_flip,faceRect2,CV_RGB(255,0,0),2,CV_AA);
			imshow("DEBUG",equalizedImg_flip);
#endif
				faceRect.x = cvRound(faceRect.x / scale);
				faceRect.width = cvRound(faceRect.width / scale);
				faceRect.x = equalizedImg_flip.cols - (faceRect.x + faceRect.width);				
				faceRect.x = cvRound(faceRect.x * scale);
				faceRect.width = cvRound(faceRect.width * scale);
#ifdef _DEBUG
				printf("@@@@Find Profile Face -- LEFT\n");
#endif
				rect2FPI(faceRect,tmpfpi,0,srcImgGray.cols,srcImgGray.rows);
				storeFpi.push_back(tmpfpi);
				findface = 1;
				facetype = 3; // left face
			}
		}
	}
	/*
	if(!findface){
		IplImage iplInput = equalizedImg;
		Mat equalizedImgRotated;	
		//equalizedImg.copyTo(equalizedImgRotated);
		IplImage iplInputRotated = equalizedImgRotated;
		int anglelist[] = {45,-45};
		int count = 0;
		while(!findface){
			if(count == sizeof(anglelist)/sizeof(int))
				break;		
			//GS_rotate(&iplInput, &iplInputRotated, anglelist[count]);    	
			rotateImageMat(equalizedImg,equalizedImgRotated,anglelist[count]);
#ifdef _DEBUG
			imshow( "DEBUG", equalizedImgRotated); 
#endif
			detectLargestObject_afterprocess(equalizedImgRotated, scale, srcImgGray.cols, srcImgGray.rows, faceCascade, faceRect,scaledWidth);		
			// Check if a face was detected.
			if (faceRect.width > 0) {
#ifdef _DEBUG
				printf("****Find Rotated Face\n");
#endif
				rect2FPI(faceRect,tmpfpi,anglelist[count],srcImgGray.cols,srcImgGray.rows);
				storeFpi.push_back(tmpfpi);
				findface = 1;
				facetype = 4 + count; // 4 -> 45' 5 -> -45'
			}
			count++;
		}
	}
	*/
    return facetype;
}
示例#24
0
//---------------------------------------------------------------------
//	main method
//---------------------------------------------------------------------
void Windows::mainLoop()
{
	while (C_S_CAPTURE != END)
	{
		cap.updateFrame(&frame);

		if (C_S_CAPTURE == PRE_TRACK)
		{
			output = new Output();
			output->setFileName("output");
			C_S_CAPTURE = TRACK;
			clock.clearTime();
		}

		if (C_S_CAPTURE == TRACK)
			clock.startClock();
		
		tracking.updateFrames(frame, t_info);

		if (C_S_SELECTION == SELECTED_UNMARKED)
			tracking.setMarker(inputs->mouseSelection);

		if (C_S_CAPTURE != INIT)
		{
			imshow(windowName_Original, tracking.camshift());
		}else
		{
			if (HELP)
			{
				cv::Point2d point(0, 50);
				for (int i = 0; i < 5; i++, point.y+=50)
					cv::putText(frame, textHelp[i], point, cv::FONT_HERSHEY_PLAIN, 2, cv::Scalar(0, 0, 0, 0));
			}
			else if (WARNING_TYPE == TND)
			{
				cv::Point2d point(0, 130);
				cv::putText(frame, textHelp[0], point, cv::FONT_HERSHEY_PLAIN, 2, cv::Scalar(0, 0, 0, 0));
			}
			else if (WARNING_TYPE == WTA)
			{
				cv::Point2d point(0, 130);
				cv::putText(frame, textHelp[1], point, cv::FONT_HERSHEY_PLAIN, 2, cv::Scalar(0, 0, 0, 0));
			}


			imshow(windowName_Original, frame);
		}
			

		inputs->keyboard();

		if (C_S_CAPTURE == TRACK)
		{
			currentFrame++;
			clock.endClock();
			output->addMotion();
		}

		if (C_S_CAPTURE == STOP)
		{
			output->exportToBvh(clock.getFrameTime());
			output->closeBVHFile();
			output->~Output();
			clock.clearTime();
			C_S_CAPTURE = CALIBRATION;
		}
	}
}
// TODO: This is currently a dirty, filthy, no-good duplicate of the callback.
void run_april_tag_detection_and_processing(cv::Mat& image_gray)
{
#ifdef DEBUG_APRIL_LATENCY_PROFILING
  // Need a new way to profile
#endif //DEBUG_APRIL_LATENCY_PROFILING

#ifdef DEBUG_APRIL_PROFILING
  static int n_count = 0;
  static double t_accum = 0;

  ros::Time start = ros::Time::now();

  if ( (ros::Time::now() - prev_t).toSec() > 1.0 )
  {
    crop_image = false;
  }
#endif // DEBUG_APRIL_PROFILING

#ifdef DEBUG_ROS_APRIL
  bool curr_frame_cropped = false;
#endif

  vector<AprilTags::TagDetection> detections;
  vector<cv::Point> rect_corners;

  if (!new_tag_pos_prediction)
  {
    crop_image = false;
  }
  // Clear flag so we don't window the same place again
  new_tag_pos_prediction = false;

  if (crop_image)
  {
    rect_corners = calculate_roi_from_pose_estimate(window_centre_point);
    crop_image = fix_roi_boundaries(image_gray, rect_corners);
  }

  detections= m_tagDetector->extractTags(image_gray);


  for(int i = 0; i < (int) detections.size(); i++)
  {
    print_detection(detections[i]);
  }

#ifdef DEBUG_ROS_APRIL
  for (int i=0; i < (int) detections.size(); i++) {
    // also highlight in the image
    if(detections[i].id == 0)
    {
      detections[i].draw(image_gray);
    }
  }

  if (curr_frame_cropped)
  {
    cv::rectangle(image_gray, rect_corners[0], rect_corners[1], cv::Scalar(0,255,255), 3);
  }

  imshow("AprilResult", image_gray); // OpenCV call
  cv::waitKey(30);
#endif

#ifdef DEBUG_APRIL_PROFILING
  ros::Time end = ros::Time::now();
  n_count++;
  t_accum += (end - start).toSec();
  if (n_count >= 100)
  {
    ROS_DEBUG("Avg april tag run time: %f", t_accum/100.0);
    std::cerr << "Avg april tag run time: " << t_accum/100.0 << std::endl;
    n_count = 0;
    t_accum = 0;
  }
#endif // DEBUG_APRIL_PROFILING

#ifdef DEBUG_APRIL_LATENCY_PROFILING
  // Need a new way to profile
#endif //DEBUG_APRIL_LATENCY_PROFILING

}
示例#26
0
void redraw()
{
	imshow(titleSrc, img);
	imshow(titleDest, imgRes);
}
示例#27
0
//Perform a background subtraction of one camera
void depthBackgroundSub_Par(KinectSensor* cam, ofstream* outDebug)
{
	char camId[20];
	_itoa(cam->getIdCam(), camId, 10);
	char windName_Back[50];
	strcpy(windName_Back, "Background subtraction ");
	strcat(windName_Back, camId);
	Mat backImg(Size(XN_VGA_X_RES, XN_VGA_Y_RES), CV_8UC1);
	namedWindow(windName_Back);

	cam->startDevice();

	bool stop = false;
	bool firstTime = true;

	int total = XN_VGA_Y_RES*XN_VGA_X_RES;
	BackgroundDepthSubtraction* subtractor;
	//allocate enough memory in advance (% of the total points)
	XnPoint3D* points2D = new XnPoint3D[MAX_FORGROUND_POINTS];	
	int numPoints = 0;
	int contFrames = 0;

	while (!stop)
	{
		//wait for the next frame to be ready
		cam->waitAndUpdate();
		//recover the depth map
		const XnDepthPixel* dm = cam->getDepthMap();

		//ptime time_start_wait(microsec_clock::local_time());
		if (contFrames == 0)//store the background model
			subtractor = new BackgroundDepthSubtraction(dm);
		else 
			numPoints = subtractor->subtraction(points2D, dm); //returns the num poins of foreground
		
		//ptime time_end_wait(microsec_clock::local_time());
		//time_duration duration_wait(time_end_wait - time_start_wait);
		//(*outDebug) << "Time report(bgs "<< camId << "): " << duration_wait.total_microseconds() << endl;
				
		Utils::initMat1u(backImg, 0);
		subtractor->createBackImage(points2D, backImg, numPoints);

		//display image
		imshow(windName_Back, backImg);
		char c = cvWaitKey(1);
		stop = (c == 27) || (contFrames == 250);
		
//		stop = (contFrames == 250);
		//for recorded videos
	//	if (cam->getDepthNode()->GetFrameID() == 1)
	//		if (firstTime ? firstTime = false : stop = true);

		contFrames++;
	}
	//ptime time_end(microsec_clock::local_time());
	//time_duration duration(time_end - time_start);
	//double totalSecs = duration.total_microseconds()/1000000;
	//double fps = contFrames/totalSecs;
	//cout << "Fps: " << fps << endl;

	cam->stopDevice();

	//free memory
	delete(points2D);
	delete(subtractor);
}
示例#28
0
int main(int argc, char **argv) 
{
    char c;
    char *filepath;
    int cut_horizontal = 0;
    int cut_vertical = 0;
    int timed = 0;
    int show = 0;

    while ((c = getopt(argc, argv, "f:h:v:ts")) != -1) {
        switch (c) {
        case 'f': filepath = optarg; break;
        case 'h': cut_horizontal = (int)strtol(optarg, NULL, 10); break;
        case 'v': cut_vertical = (int)strtol(optarg, NULL, 10); break;
        case 't': timed = 1; break;
        case 's': show = 1; break;
        default: exit(1); 
        }
    }

    // OpenCL boilerplate
    std::string ..._kernel_str;

    std::string ..._name_str = std::string("...");
    std::string ..._kernel_file = std::string("...");

    cl_vars_t cv; 
    cl_kernel ...;

    readFile(..._kernel_file, ..._kernel_str);

    initialize_ocl(cv);

    compile_ocl_program(..., cv, ..._kernel_str.c_str(), ..._name_str.c_str());

    // Read image
    Mat_<Vec3b> image = imread(filepath);

    if (!image.data) {
        cout << "Invalid input";
        image.release();
        return -1;
    }

    if (show) {
        imshow("Original Image", image);
    }

    SeamCarver s(image);

    // imshow("Gradient", s.energy);
    // Mat tmp = s.energy/195075.0*255.0;
    // s.energy.convertTo(tmp,CV_8U,-1);
    // imwrite("bench_gradient.jpg", tmp);
    // vector<uint> sm = s.findVerticalSeam();
    // s.showVerticalSeam(sm);


    // Carving happens here
    double start = get_time();
    ...;
    double elapsed = get_time() - start;
    // --------------------

    // double start = get_time();
    // for (int i = 0; i < cut_horizontal; ++i) {
    //     vector<uint> seam = s.findHorizontalSeam();
    //     // s.showHorizontalSeam(seam);
    //     s.removeHorizontalSeam(seam);
    // }
    // for (int i = 0; i < cut_vertical; ++i) {
    //     vector<uint> seam = s.findVerticalSeam();
    //     // s.showVerticalSeam(seam);
    //     s.removeVerticalSeam(seam);
    // }
    // double elapsed = get_time() - start;

    if (timed) {
        printf("Elapsed time: %.3lf seconds\n", elapsed);
    }

    Mat_<Vec3b> output = s.getImage();
    imwrite("scarved.jpg", output);

    if (show) {
        imshow("Carved Image", output);
        while (waitKey(20) != 27);
    }

    // cout << "Seam Length: " << seam.size() << endl;
    // s.showImage();
    // s.showEnergy();

    // imwrite("bench_carved.jpg", s.getImage());

    // for (int i = 0; i < 5; ++i) {
    //     for (int j = 0; j < 5; ++j) {
    //         cout << s.energy.at<uint32_t>(i,j) << " ";
    //     }
    //     cout << endl;
    // }

    uninitialize_ocl(cv);

    ...;

    clReleaseMemObject(...); 

    image.release();

    return 0;
}
示例#29
0
	float SaliencyComputer::ComputeSegmentSaliency(const Mat& img, const SegSuperPixelFeature& sp_feat, SaliencyType type)
	{

		if( type == Composition )
		{

		}
		if( type == CenterSurroundHistogramContrast )
		{

			if(lab_img.empty())
				cvtColor(img, lab_img, CV_BGR2Lab);

			// ignore too big segment
			///*if(sp_feat.area >= 0.6*img.rows*img.cols)
			//	return -1;*/

			// define surround context
			// bounding box with 2X width and height
			Rect segment_box = sp_feat.box;
			Point tl_pts(segment_box.tl().x-segment_box.width, segment_box.tl().y-segment_box.height);
			tl_pts.x = MAX(0, tl_pts.x);
			tl_pts.y = MAX(0, tl_pts.y);
			Point br_pts(segment_box.br().x+segment_box.width, segment_box.br().y+segment_box.height);
			br_pts.x = MIN(br_pts.x, img.cols-1);
			br_pts.y = MIN(br_pts.y, img.rows-1);
			Rect context_box(tl_pts, br_pts);
			context_box = Rect(0, 0, img.cols, img.rows);

			// show boxes
			Mat temp_img = img.clone();
			rectangle(temp_img, segment_box, CV_RGB(0,255,0), 1);
			rectangle(temp_img, context_box, CV_RGB(255,255,0), 1);
			imshow("saliency", temp_img);
			waitKey(10);

			// compute context feature
			int count = 0;
			vector<float> context_feat(quantBins[0]+quantBins[1]+quantBins[2], 0);
			for(int y=context_box.tl().y; y<context_box.br().y; y++)
			{
				for(int x=context_box.tl().x; x<context_box.br().x; x++)
				{
					if( SegSuperPixelFeature::InsideSegment(Point(x,y), sp_feat) )
						continue;

					/*Vec3b val = img.at<Vec3b>(y,x);
					float b = val.val[0];
					float g = val.val[1];
					float r = val.val[2];
					int bbin = (int)(b/(255.f/quantBins[0]));
					bbin = ( bbin > quantBins[0]-1? quantBins[0]-1: bbin );
					int gbin = (int)(g/(255.f/quantBins[1]));
					gbin = ( gbin > quantBins[1]-1? quantBins[1]-1: gbin );
					int rbin = (int)(r/(255.f/quantBins[2]));
					rbin = ( rbin > quantBins[2]-1? quantBins[2]-1: rbin );

					context_feat[bbin]++;
					context_feat[quantBins[0]+gbin]++;
					context_feat[quantBins[0]+quantBins[1]+rbin]++;*/

					Vec3b val = lab_img.at<Vec3b>(y,x);
					float l = val.val[0];
					float a = val.val[1];
					float b = val.val[2];
					int lbin = (int)(l/(255.f/quantBins[0]));
					lbin = ( lbin > quantBins[0]-1? quantBins[0]-1: lbin );
					int abin = (int)(a/(255.f/quantBins[1]));
					abin = ( abin > quantBins[1]-1? quantBins[1]-1: abin );
					int bbin = (int)(b/(255.f/quantBins[2]));
					bbin = ( bbin > quantBins[2]-1? quantBins[2]-1: bbin );

					context_feat[lbin]++;
					context_feat[quantBins[0]+abin]++;
					context_feat[quantBins[0]+quantBins[1]+bbin]++;
					count+=3;
				}
			}

			// do normalization
			vector<float> feat1(context_feat.size(), 0);	// segment
			vector<float> feat2(context_feat.size(), 0);	// context
			for(size_t i=0; i<sp_feat.feat.size(); i++)
			{
				feat1[i] = sp_feat.feat[i] / (sp_feat.area*3);
				feat2[i] = context_feat[i] / count;
			}

			// compute distance
			float dist = 0;
			for(size_t i=0; i<feat1.size(); i++)
				dist += (feat1[i]-feat2[i])*(feat1[i]-feat2[i]);
			dist = sqrt(dist);

			if(dist > 1)
				cout<<"error"<<endl;

			return dist;

		}


	}
示例#30
0
int _tmain(int argc, _TCHAR* argv[])
{

	GLFWwindow* window = 0;
	glfwSetErrorCallback(glfw_error_callback_func);


	// Initialise GLFW
	if (!glfwInit())
	{
		fprintf(stderr, "Failed to initialize GLFW\n");
		getchar();
		return -1;
	}

	//-----------------------------------------------------------------------------
	glfwWindowHint(GLFW_SAMPLES, 4);

	// GL3.3 Core profile
	glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
	glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
	//	glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
	glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);

	glfwWindowHint(GLFW_VISIBLE, 0);	//オフスクリーン

	// Open a window and create its OpenGL context
	window = glfwCreateWindow(1, 1, "GPGPU Test", NULL, NULL);
	if (window == NULL){
		fprintf(stderr, "Failed to open GLFW window. If you have an Intel GPU, they are not 3.3 compatible. Try the 2.1 version of the tutorials.\n");
		getchar();
		glfwTerminate();
		return -1;
	}
	glfwMakeContextCurrent(window);

#if defined _WIN32
	// Initialize GLEW
	glewExperimental = GL_TRUE;			///!!!! important for core profile // コアプロファイルで必要となります
	if (glewInit() != GLEW_OK) {
		fprintf(stderr, "Failed to initialize GLEW\n");
		getchar();
		glfwTerminate();
		return -1;
	}
#endif


	{
		cout << "GL_VENDOR:" << glGetString(GL_VENDOR) << endl;
		cout << "GL_RENDERER:" << glGetString(GL_RENDERER) << endl;
		cout << "GL_VERSION:" << glGetString(GL_VERSION) << endl;
		cout << "GL_SHADING_LANGUAGE_VERSION:" << glGetString(GL_SHADING_LANGUAGE_VERSION) << endl;

	}

//	Mat imgSrc = Mat(Size(32, 24), CV_32FC1);
	Mat imgSrc = Mat(Size(8, 4), CV_32FC1);
	Mat imgDst = Mat::zeros(imgSrc.size(), imgSrc.type());

	//---------------------------------
	//init Src image
	{
		const int width = imgSrc.cols;
		const int height = imgSrc.rows;

		for (int y = 0; y < height; y++){
			for (int x = 0; x < width; x++){
				imgSrc.at<float>(y,x) = y*100.0f + x;
			}
		}
	}


	//---------------------------------
	//Execute GPGPU
	{
		const int width = imgSrc.cols;
		const int height = imgSrc.rows;


		// Create and compile our GLSL program from the shaders
		GLuint programID = LoadShaders("GpGpuVertexShader.vertexshader", "GpGpuFragmentShader.fragmentshader");

		// FBO identifier
		GLuint fbo = 0;

		//---------------------------------
		// FBO
		// create FBO (off-screen framebuffer)
		glGenFramebuffers(1, &fbo);

		// bind offscreen framebuffer (that is, skip the window-specific render target)
		glBindFramebuffer(GL_FRAMEBUFFER, fbo);

		// texture

		enum E_TextureID{
			SRC,
			DST,
			SIZEOF,
		};

		unsigned int textureID[E_TextureID::SIZEOF];	//src dst
		//---------------------------------
		// CreateTexture
		{
			GLenum format = GL_RED;				//single channel
			GLenum type = GL_FLOAT;				//float
			GLenum internalFormat = GL_R32F;	//single channel float

			glGenTextures(sizeof(textureID) / sizeof(textureID[0]), textureID); // create (reference to) a new texture

			for (int i = 0; i < sizeof(textureID) / sizeof(textureID[0]); i++){
				glBindTexture(GL_TEXTURE_2D, textureID[i]);
				// (set texture parameters here)
				glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
				glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
				glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
				glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);

				//create the texture
				glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, width, height, 0, format, type, 0);

				glBindTexture(GL_TEXTURE_2D, 0);
			}

		}

		//upload imgSrc to texture
		{
			GLenum format = GL_RED;				//single channel
			GLenum type = GL_FLOAT;				//float
			void* data = imgSrc.data;

			glBindTexture(GL_TEXTURE_2D, textureID[E_TextureID::SRC]);
			glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, format, type, data);
			glBindTexture(GL_TEXTURE_2D, 0);
		}


		//Execute
		{
			glUseProgram(programID);

			GLuint vao;
			GLuint vbo;

			// [-1, 1] の正方形
			static GLfloat position[][2] = {
				{ -1.0f, -1.0f },
				{ 1.0f, -1.0f },
				{ 1.0f, 1.0f },
				{ -1.0f, 1.0f }
			};

			// create vao&vbo
			glGenVertexArrays(1, &vao);
			glGenBuffers(1, &vbo);

			// bind vao & vbo
			glBindVertexArray(vao);
			glBindBuffer(GL_ARRAY_BUFFER, vbo);

			// upload vbo data
			glBufferData(GL_ARRAY_BUFFER, (int)sizeof(position), position, GL_STATIC_DRAW);

			// Set VertexAttribute
			GLint attrLoc = glGetAttribLocation(programID, "position");
			glEnableVertexAttribArray(attrLoc);	//enable attribute Location
			glVertexAttribPointer(
				attrLoc,			// attribute 0. No particular reason for 0, but must match the layout in the shader.
				2,					// size	(Specifies the number of components) x,y
				GL_FLOAT,			// type
				GL_FALSE,			// normalized?
				0,					// stride (Specifies the byte offset between consecutive generic vertex attributes)
				(void*)0			// array buffer offset (Specifies a pointer to the first generic vertex attribute in the array)
				);

			//Bind Texture & Fbo
			const int textureUnit = 0;
			glActiveTexture(GL_TEXTURE0 + textureUnit);
			glBindTexture(GL_TEXTURE_2D, textureID[E_TextureID::SRC]);
			glUniform1i(glGetUniformLocation(programID, "texSrc"), textureUnit);
			glUniform2f(glGetUniformLocation(programID, "texSrcSize"),width,height);
			glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureID[E_TextureID::DST], 0);



			//Viewport
			glViewport(0, 0, width, height);

			//Render!!
			glDrawArrays(GL_TRIANGLE_FAN, 0, (int)(sizeof(position) / sizeof(position[0])));

			glFlush();

			// delete vao&vbo
			glBindVertexArray(0);
			glDeleteVertexArrays(1, &vao);
			glDeleteBuffers(1, &vbo);
		}

		{	//download from framebuffer

			GLenum format = GL_RED;				//single channel
			GLenum type = GL_FLOAT;				//float
			void* data = imgDst.data;
			int width = imgDst.cols;
			int height = imgDst.rows;


			//wait for Rendering
			glFinish();

			// ReadBuffer
			glReadBuffer(GL_COLOR_ATTACHMENT0);

			// ReadPixels
			glReadPixels(0, 0, width, height, format, type, data);
		}

		//clean up
		glDeleteFramebuffers(1, &fbo);
		glDeleteTextures(sizeof(textureID) / sizeof(textureID[0]), textureID);
		glDeleteProgram(programID);
	}

	//dump 
	{
		cout << "imgSrc" << endl;
		cout << imgSrc << endl;

		cout << "imgDst" << endl;
		cout << imgDst << endl;
	}

	//verify
	int errNum = 0;
	{
		//verify
		int width = imgSrc.cols;
		int height = imgSrc.rows;
		for (int y = 0; y < height; y++){
			for (int x = 0; x < width; x++){
				float src = imgSrc.at<float>(y, x);
				float dst = imgDst.at<float>(y, x);
				if (src != dst) errNum++;
			}
		}
		cout << "ErrNum:" << errNum << endl;
	}

#if 0
	//visualize
	{
		imshow("src", imgSrc);
		imshow("dst", imgDst);
		waitKey();
	}
#endif

	// Close OpenGL window and terminate GLFW
	glfwTerminate();
	
	cout << "Hit return key" << endl;
	cin.get();

	return errNum;
}