Esempio n. 1
0
void StereoSingleGpu::compute(const Mat& leftFrame, const Mat& rightFrame, Mat& disparity)
{
    cuda::setDevice(deviceId_);
    d_leftFrame.upload(leftFrame);
    d_rightFrame.upload(rightFrame);
    d_alg->compute(d_leftFrame, d_rightFrame, d_disparity);
    d_disparity.download(disparity);
}
Esempio n. 2
0
void cv::cuda::syncOutput(const GpuMat& dst, OutputArray _dst, Stream& stream)
{
#ifndef HAVE_CUDA
    (void) dst;
    (void) _dst;
    (void) stream;
    throw_no_cuda();
#else
    if (_dst.kind() != _InputArray::CUDA_GPU_MAT)
    {
        if (stream)
            dst.download(_dst, stream);
        else
            dst.download(_dst);
    }
#endif
}
int main( int argc, const char** argv )
{

	VideoCapture cap;
	Rect trackWindow;

	struct timeval timea, timeb, timeS, timeE;
	long totalTime = 0, matchTime = 0, convertTime = 0, loadTime = 0;
	int nFrames = 0;

	cap.open("/home/ubuntu/Aerial/photos/SoccerGoal2_464.mp4"); //open smaller video file (reccomended for Jetson)
//	cap.open("/home/scott/Aerial//aerial_navigation/photos/SoccerGoal2.mp4"); //open regular video file (desktop)

	cerr << cap.get(CV_CAP_PROP_FRAME_WIDTH) << endl;
	cerr << cap.get(CV_CAP_PROP_FRAME_HEIGHT) << endl;
	vector<string> screenshots;
	//smaller training images (Jetson)
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh1_464.png");
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh2_464.png");
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh3_464.png");
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh4_464.png");
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh5_464.png");
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh6_464.png");
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh7_464.png");
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh8_464.png");
	//regular training images (Jetson)
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh1.png");
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh2.png");
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh3.png");
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh4.png");
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh5.png");
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh6.png");
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh7.png");
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh8.png");


	if( !cap.isOpened() ) //make sure video file could be opened
	{
		cout << "***Could not initialize capturing...***\n";
		return -1;
	}
	//define the shape that is to be used for errode and dilate. Change size to increase or decrease the amount erroded and dilated
	Mat element = getStructuringElement(element_shape, Size(3, 3), Point(-1, -1) );

	//Initialize kalman filter
	KalmanFilter KF(4, 2, 0);
	Mat_<float> measurement(2,1); measurement.setTo(Scalar(0));
	Point pt(0, 0);

	//intialize display window
	namedWindow( "TrackingWicket", 0 );
	setMouseCallback( "TrackingWicket", onMouse, 0 );

	Rect bb; //rectangle for used for masking image to decrease template match search time

	//state variables
	bool paused = false;
	bool debug = true;

	cap >> frame0; //load the first frame
	paused = true; //paused for training
	vector<int> index(8); //indexes of the training images
	Point2f ctr_point, kal_point;

	ctr_point = pt; //point for the measured center of matched image
	kal_point = pt; //point for the corrected kalman filter eastimate

	//Gather training images
	for(int i = 0; i < screenshots.size(); i++){
		sh = imread(screenshots[i]); //read in trained image file

		for(;;){

			gpu_frame0.upload(sh); //upload image to gpu memory
			proccess_frame(element, thresh); //process the frame prior to selection

			gpu_gray.download(image); //download processed image so it can be displayed
			if(trackObject < 0) { //part of image has been selected so get the trained image

				mask_coll[i] = GpuMat(gpu_gray.size(), CV_8UC1, Scalar::all(0)); //intialize a mask
				mask_coll[i](selection).setTo(Scalar::all(255)); //set the mask to be the selected area
				gpu::bitwise_and(gpu_gray, mask_coll[i], train_coll[i]); //set the image to be only the parts in the mask
				train_coll[i] = train_coll[i](selection); //set the trained image to be just the size of the selection. I'm not sure that this process is the best way
				selections[i] = selection; //save the selection value for later use
				index[i] = i; // save the index value
				trackObject = 0; //set track object to 0 so we don't repeate this process until we have selected an object
				selectObject = 0; //reset the selection object state to no object
				break;
			}
			if( selectObject && selection.width > 0 && selection.height > 0 ) //if selecting an object show the area being selected
			{
				Mat mask(image, selection);
				bitwise_not(mask, mask);
			}
			imshow("TrackingWicket", image); //display the image
			waitKey(10);
		}
		break;
	}
	//loop over frames in video feed (breaks at end of file)
	for(;;)
	{
		gettimeofday(&timea, NULL); //start overal timer
		if( !paused )
		{
			gettimeofday(&timeS, NULL); //start image load timer
			cap >> frame0; //load next frame
			gettimeofday(&timeE, NULL); //end image load timer
			loadTime += getTimeDelta(timeS, timeE); //add to the load time
			nFrames++; //increment the frames proccessed count
			if( frame0.empty() ) //make sure we have a frame stored
				break;
		}
		if( !paused ) //skip if paused
		{
			if(trackObject < 0) { //if this is first pass through tracking do some initialization
				//set point p to be the center of the selected area
				Point p = Point(selection.tl().x + (selection.width / 2), selection.tl().y + (selection.height / 2));
				bb = selection; //bounding box for the search area is the selection

				kalman_init(KF, p, 1e-4, 1e-4, .1); //initialize kalman filter.

				ctr_point = pt;
				kal_point = pt;

				trackObject = 1; //set this so we don't come through here again
			}
			if(trackObject){

				Mat prediction = KF.predict(); //predict where the center of the match will be
				Point predictPt(prediction.at<float>(0),prediction.at<float>(1)); //get the point
				bool smallwindow = false;
				if(predictPt.x != 0 || predictPt.y != 0){ //if the predicted point isn't the first (which is bad) then use the predicted point to set the search box location
					smallwindow = true;
					selection.x = predictPt.x - (selection.width/2);
					selection.y = predictPt.y - (selection.height/2);
				}

				gettimeofday(&timeS, NULL); //start convert timer
				gpu_frame0.upload(frame0); //upload frame to gpu memory
				proccess_frame(element, thresh); //process the frame

				gettimeofday(&timeE, NULL); //stop convert timer
				convertTime += getTimeDelta(timeS, timeE);
				//gpu_gray.download(gray);

				double best_max_value = 0;
				Point best_location;
				Rect predictRect;
				int idx = 0;
				gettimeofday(&timeS, NULL); //start match template timer
				if(smallwindow){ //if we are using a small window to search for the template

					//change the dimensions of the search box to be 3 times bigger than the size of the train image/selection
					int wt = 1.5*selection.width;
					int ht = 1.5*selection.height;
					//set top left and bottom right locations of search area
					predictRect = Rect(predictPt.x - wt, predictPt.y - ht, predictPt.x + wt, predictPt.y + ht);

					GpuMat roi(gpu_gray, predictRect); //get area of image we want to search

					match_template(roi, train_coll, index, best_max_value, best_location, idx); //run template match

				} else //search the whole image (slow)
					match_template(gpu_gray, train_coll, index, best_max_value, best_location, idx); //run template match

				gettimeofday(&timeE, NULL); //end template match timer
				matchTime += getTimeDelta(timeS, timeE);

				if (best_max_value > .8){ //if the value found was better than .8 the update the found location. Otherwise we didn't find a good enough spot (this is not tuned and can be changed)
					if(smallwindow){
						best_location.x = best_location.x + predictRect.tl().x;
						best_location.y = best_location.y + predictRect.tl().y;
						bb = Rect(best_location.x,best_location.y, selections[index[idx]].width, selections[index[idx]].height);//box is now the size of the matched image and the location of the best fit
						box_update(KF, bb, measurement, ctr_point, kal_point); //update the current location of the image and bounding box
					} else {
						bb = Rect(best_location.x,best_location.y, selections[index[idx]].width, selections[index[idx]].height);
						box_update(KF, bb, measurement, ctr_point, kal_point);
					}
				} else //the object wasn't in our window so search the whole image to find it.
					smallwindow = false;

			}
		}


		if( trackObject < 0 ) {
			paused = false;
		}

		gettimeofday(&timeb, NULL); //stop total timer
		totalTime += getTimeDelta(timea, timeb);
		if(debug){ //if debugging then display the image and rectangles of where the kalman filter (red) things the best spot is and where the matched (yellow) spot is
			frame0.copyTo(image);
			rectangle(image, selection, Scalar(0, 0, 255), 1, 8, 0);
			rectangle(image, bb, Scalar(0, 255, 255), 1, 8, 0);
			circle( image, kal_point, 4, Scalar(0, 0, 255), -1, 8, 0 );
			circle( image, ctr_point, 4, Scalar(0, 255, 255), -1, 8, 0 );
			imshow( "TrackingWicket", image );
		}

		char c = (char)waitKey(10);
		if( c == 27 )
			break;
		switch(c)
		{
		case 'c':
			trackObject = 0;
			break;
		case 'd':
			debug = !debug;
			break;
		case 'p':
			paused = !paused;
			cout << "frames                       : " << nFrames << endl;
			cout << "TotalTime                    : " << double(totalTime)/1000000.0 << endl;
			cout << "FPS                          : " << double(nFrames)/(double(totalTime)/1000000.0) << endl;
			cout << "Percentage Convert Time      : " << double(convertTime)/double(totalTime) << endl;
			cout << "Percentage Match Time        : " << double(matchTime)/double(totalTime) << endl;
			cout << "Percentage Load Time         : " << double(loadTime)/double(totalTime) << endl;
			totalTime = matchTime = convertTime = loadTime = 0;
			nFrames = 0;
			break;
		default:
			;
		}
	}
Esempio n. 4
0
static void download(const GpuMat& d_mat, vector<uchar>& vec)
{
    vec.resize(d_mat.cols);
    Mat mat(1, d_mat.cols, CV_8UC1, (void*)&vec[0]);
    d_mat.download(mat);
}
Esempio n. 5
0
cv::Mat::Mat(const GpuMat& m) : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows)
{
    m.download(*this);
}
Esempio n. 6
0
static void download(const GpuMat& d_mat, vector<Point2f>& vec)
{
    vec.resize(d_mat.cols);
    Mat mat(1, d_mat.cols, CV_32FC2, (void*)&vec[0]);
    d_mat.download(mat);
}
Esempio n. 7
0
void _ObjectDetector::findObjectByContour(void)
{
	int i;
	vector<vector<Point> > contours;
	vector<Vec4i> hierarchy;
	Rect boundRect;

	//DEMO
	if(m_bOneImg==1)
	{
		boundRect.height = m_Mat.size().height - 50;
		boundRect.width = boundRect.height;
		boundRect.x = (m_Mat.size().width - boundRect.width)*0.5;
		boundRect.y = (m_Mat.size().height - boundRect.height)*0.5;

		m_pClassMgr->addObject(get_time_usec(),&m_Mat,&boundRect,NULL);
		return;
	}

	return;
//	m_pContourFrame->switchFrame();
	GpuMat* pThr = m_pContourFrame->getGMat();

	m_pCanny->detect(*m_pGray, *pThr);

	// Detect edges using Threshold
//	cuda::threshold(*m_pGray, *pThr, 200, 255, THRESH_BINARY);
	pThr->download(m_contourMat);

	// Find contours
	findContours(m_contourMat, contours, hierarchy, CV_RETR_TREE,
			CV_CHAIN_APPROX_SIMPLE);
	//	findContours(m_frame, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

	// Approximate contours to polygons + get bounding rects
	vector<vector<Point> > contours_poly(contours.size());

	for (i = 0; i < contours.size(); i++)
	{
		approxPolyDP(Mat(contours[i]), contours_poly[i], 3, true);

		boundRect = boundingRect(Mat(contours_poly[i]));
		if (boundRect.area() < 5000)
			continue;

		int extraW = boundRect.width * 0.15;
		int extraH = boundRect.height * 0.15;

		boundRect.x -= extraW;
		boundRect.y -= extraH;
		if (boundRect.x < 0)
			boundRect.x = 0;
		if (boundRect.y < 0)
			boundRect.y = 0;

		boundRect.width += extraW + extraW;
		boundRect.height += extraH + extraH;

		int overW = m_Mat.cols - boundRect.x - boundRect.width;
		int overH = m_Mat.rows - boundRect.y - boundRect.height;
		if (overW < 0)
			boundRect.width += overW;
		if (overH < 0)
			boundRect.height += overH;

		m_pClassMgr->addObject(get_time_usec(),&m_Mat,&boundRect,&contours_poly[i]);
	}

}
Esempio n. 8
0
int Transformer::recognize(const GpuMat& img){
	Mat img_host;
	img.download(img_host);
	return ocr->predict(img_host);
}
Esempio n. 9
0
Mat visionUtils::cannySegmentation(Mat img0, int minPixelSize, bool displayFaces)
{
    // Segments items in gray image (img0)
    // minPixelSize=
    // -1, returns largest region only
    // pixels, threshold for removing smaller regions, with less than minPixelSize pixels
    // 0, returns all detected segments


    // LB: Zero pad image to remove edge effects when getting regions....
    int padPixels=20;
    // Rect border added at start...
    Rect tempRect;
    tempRect.x=padPixels;
    tempRect.y=padPixels;
    tempRect.width=img0.cols;
    tempRect.height=img0.rows;

    Mat img1 = Mat::zeros(img0.rows+(padPixels*2), img0.cols+(padPixels*2), CV_8UC1);
    img0.copyTo(img1(tempRect));


    if (useGPU)// converted to GPU -> NOT tested to speed up here!
    {
        GpuMat imgGPU;
        imgGPU.upload(img1);
#if CV_MAJOR_VERSION == 2
        gpu::Canny(imgGPU, imgGPU, 100, 200, 3); //100, 200, 3);
#elif CV_MAJOR_VERSION == 3
        cv::Ptr<cv::cuda::CannyEdgeDetector> canny = cv::cuda::createCannyEdgeDetector(100, 200, 3);
        canny->detect(imgGPU, imgGPU);
#endif
        imgGPU.download(img1);
    }
    else
    {
        Canny(img1, img1, 100, 200, 3); //100, 200, 3);
    }


    // find the contours
    vector< vector<Point> > contours;
    findContours(img1, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);

    // Mask for segmented regiond
    Mat mask = Mat::zeros(img1.rows, img1.cols, CV_8UC1);

    vector<double> areas(contours.size());

    if (minPixelSize==-1)
    {   // Case of taking largest region
        for(int i = 0; i < (int)contours.size(); i++)
            areas[i] = contourArea(Mat(contours[i]));
        double max;
        Point maxPosition;
        cv::minMaxLoc(Mat(areas),0,&max,0,&maxPosition);
        drawContours(mask, contours, maxPosition.y, Scalar(1), CV_FILLED);
    }
    else
    {   // Case for using minimum pixel size
        for (int i = 0; i < (int)contours.size(); i++)
        {
            if (contourArea(Mat(contours[i]))>minPixelSize)
                drawContours(mask, contours, i, Scalar(1), CV_FILLED);
        }
    }
    // normalize so imwrite(...)/imshow(...) shows the mask correctly!
    cv::normalize(mask.clone(), mask, 0.0, 255.0, CV_MINMAX, CV_8UC1);

    Mat returnMask;
    returnMask=mask(tempRect);

    // show the images
    if (displayFaces)   imshow("Canny: Img in", img0);
    if (displayFaces)   imshow("Canny: Mask", returnMask);
    if (displayFaces)   imshow("Canny: Output", img1);

    return returnMask;
}
Esempio n. 10
0
int App_main( int argc, char** argv )
{
    int numImg=50;

#if !defined WIN32 && !defined _WIN32 && !defined WINCE && defined __linux__ && !defined ANDROID
    pthread_setname_np(pthread_self(),"App_main");
#endif

    char filename[500];
    Mat image, cameraMatrix, R, T;
    vector<Mat> images,Rs,Ts;
    
    Mat ret;//a place to return downloaded images to

    
    double reconstructionScale=5/5.;

    for(int i=0;i<numImg;i++){
        Mat tmp;
        sprintf(filename,"../../Trajectory_30_seconds/scene_%03d.png",i);
        convertAhandaPovRayToStandard("../../Trajectory_30_seconds",
                                      i,
                                      cameraMatrix,
                                      R,
                                      T);
        Mat image;
        cout<<"Opening: "<< filename << endl;
        
        imread(filename, -1).convertTo(image,CV_32FC3,1.0/65535.0);
        resize(image,image,Size(),reconstructionScale,reconstructionScale);
        
        images.push_back(image.clone());
        Rs.push_back(R.clone());
        Ts.push_back(T.clone());

    }
    cv::cuda::CudaMem cret(images[0].rows,images[0].cols,CV_32FC1);
    ret=cret.createMatHeader();
    //Setup camera matrix
    double sx=reconstructionScale;
    double sy=reconstructionScale;
    cameraMatrix+=(Mat)(Mat_<double>(3,3) <<    0.0,0.0,0.5,
                                                0.0,0.0,0.5,
                                                0.0,0.0,0.0);
    cameraMatrix=cameraMatrix.mul((Mat)(Mat_<double>(3,3) <<    sx,0.0,sx,
                                                                0.0,sy ,sy,
                                                                0.0,0.0,1.0));
    cameraMatrix-=(Mat)(Mat_<double>(3,3) <<    0.0,0.0,0.5,
                                                0.0,0.0,0.5,
                                                0.0,0.0,0);
    int layers=32;
    int imagesPerCV=2;
    CostVolume cv(images[0],(FrameID)0,layers,0.010,0.0,Rs[0],Ts[0],cameraMatrix);;
    
    

    int imageNum=0;
    cv::cuda::Stream s;
    for (int imageNum=0;imageNum<numImg;imageNum++){
        T=Ts[imageNum];
        R=Rs[imageNum];
        image=images[imageNum];
          
        if(cv.count<imagesPerCV){
            cv.updateCost(image, R, T);
        }
        else{
            //Attach optimizer
            Ptr<DepthmapDenoiseWeightedHuber> dp = createDepthmapDenoiseWeightedHuber(cv.baseImageGray,cv.cvStream);
            DepthmapDenoiseWeightedHuber& denoiser=*dp;
            Optimizer optimizer(cv);
            optimizer.initOptimization();
            GpuMat a(cv.loInd.size(),cv.loInd.type());
            cv.loInd.copyTo(a,cv.cvStream);
            GpuMat d;
            denoiser.cacheGValues();
            ret=image*0;
            pfShow("A function", ret, 0, cv::Vec2d(0, layers));
            pfShow("D function", ret, 0, cv::Vec2d(0, layers));
            pfShow("A function loose", ret, 0, cv::Vec2d(0, layers));
            pfShow("Predicted Image",ret,0,Vec2d(0,1));
            pfShow("Actual Image",ret);
            pfShow("A", ret, 0, cv::Vec2d(0, layers));
//                waitKey(0);
//                gpause();
            
            
             gpause();
            bool doneOptimizing; int Acount=0; int QDcount=0;
            do{
//                 cout<<"Theta: "<< optimizer.getTheta()<<endl;
//
                if(Acount==0)
                    gpause();
               a.download(ret);
               pfShow("A function", ret, 0, cv::Vec2d(0, layers));

//                 optimizer.epsilon*=optimizer.thetaStep;

                for (int i = 0; i < 10; i++) {
                    d=denoiser(a,optimizer.epsilon,optimizer.getTheta());
                    QDcount++;

                   d.download(ret);
                   pfShow("D function", ret, 0, cv::Vec2d(0, layers));
                }
                doneOptimizing=optimizer.optimizeA(d,a);
                Acount++;
            }while(!doneOptimizing);
            optimizer.lambda=.01;
            optimizer.optimizeA(d,a);
            optimizer.cvStream.waitForCompletion();
            a.download(ret);
               pfShow("A function loose", ret, 0, cv::Vec2d(0, layers));
               gpause();
//             cout<<"A iterations: "<< Acount<< "  QD iterations: "<<QDcount<<endl;
//             pfShow("Depth Solution", optimizer.depthMap(), 0, cv::Vec2d(cv.far, cv.near));
            imageNum=0;
            cv=CostVolume(images[imageNum],(FrameID)0,layers,0.010,0.0,Rs[imageNum],Ts[imageNum],cameraMatrix);
            s=optimizer.cvStream;
            for (int imageNum=0;imageNum<numImg;imageNum=(imageNum+1)%numImg){
                reprojectCloud(images[imageNum],images[0],optimizer.depthMap(),RTToP(Rs[0],Ts[0]),RTToP(Rs[imageNum],Ts[imageNum]),cameraMatrix);
            }
        }
        
    }
    s.waitForCompletion();
    Stream::Null().waitForCompletion();
    return 0;
}
Esempio n. 11
0
Mat visionUtils::skinDetect(Mat captureframe, Mat3b *skinDetectHSV, Mat *skinMask, std::vector<int> adaptiveHSV, int minPixelSize, int imgBlurPixels, int imgMorphPixels, int singleRegionChoice, bool displayFaces)
{

    if (adaptiveHSV.size()!=6 || adaptiveHSV.empty())
    {
        adaptiveHSV.clear();
        adaptiveHSV.push_back(5);
        adaptiveHSV.push_back(38);
        adaptiveHSV.push_back(51);
        adaptiveHSV.push_back(17);
        adaptiveHSV.push_back(250);
        adaptiveHSV.push_back(242);
    }


    //int step = 0;
    Mat3b frameTemp;
    Mat3b frame;
    // Forcing resize to 640x480 -> all thresholds / pixel filters configured for this size.....
    // Note returned to original size at end...
    Size s = captureframe.size();
    cv::resize(captureframe,captureframe,Size(640,480));



    if (useGPU)
    {
        GpuMat imgGPU, imgGPUHSV;
        imgGPU.upload(captureframe);
        cv::cvtColor(imgGPU, imgGPUHSV, CV_BGR2HSV);
        GaussianBlur(imgGPUHSV, imgGPUHSV, Size(imgBlurPixels,imgBlurPixels), 1, 1);
        imgGPUHSV.download(frameTemp);
    }
    else
    {
        cv::cvtColor(captureframe, frameTemp, CV_BGR2HSV);
        GaussianBlur(frameTemp, frameTemp, Size(imgBlurPixels,imgBlurPixels), 1, 1);
    }

    // Potential FASTER VERSION using inRange
    Mat frameThreshold = Mat::zeros(frameTemp.rows,frameTemp.cols, CV_8UC1);
    Mat hsvMin = (Mat_<int>(1,3) << adaptiveHSV[0], adaptiveHSV[1],adaptiveHSV[2] );
    Mat hsvMax = (Mat_<int>(1,3) << adaptiveHSV[3], adaptiveHSV[4],adaptiveHSV[5] );
    inRange(frameTemp,hsvMin ,hsvMax, frameThreshold);
    frameTemp.copyTo(frame,frameThreshold);

    /* BGR CONVERSION AND THRESHOLD */
    Mat1b frame_gray;

    // send HSV to skinDetectHSV for return
    *skinDetectHSV=frame.clone();

    cv::cvtColor(frame, frame_gray, CV_BGR2GRAY);


    // Adaptive thresholding technique
    // 1. Threshold data to find main areas of skin
    adaptiveThreshold(frame_gray,frame_gray,255,ADAPTIVE_THRESH_GAUSSIAN_C,THRESH_BINARY_INV,9,1);


    if (useGPU)
    {
        GpuMat imgGPU;
        imgGPU.upload(frame_gray);
        // 2. Fill in thresholded areas
#if CV_MAJOR_VERSION == 2
        gpu::morphologyEx(imgGPU, imgGPU, CV_MOP_CLOSE, Mat1b(imgMorphPixels,imgMorphPixels,1), Point(-1, -1), 2);
        gpu::GaussianBlur(imgGPU, imgGPU, Size(imgBlurPixels,imgBlurPixels), 1, 1);
#elif CV_MAJOR_VERSION == 3
        //TODO: Check if that's correct
        Mat element = getStructuringElement(MORPH_RECT, Size(imgMorphPixels, imgMorphPixels), Point(-1, -1));
        Ptr<cuda::Filter> closeFilter = cuda::createMorphologyFilter(MORPH_CLOSE, imgGPU.type(), element, Point(-1, -1), 2);
        closeFilter->apply(imgGPU, imgGPU);
        cv::Ptr<cv::cuda::Filter> gaussianFilter = cv::cuda::createGaussianFilter(imgGPU.type(), imgGPU.type(), Size(imgMorphPixels, imgMorphPixels), 1, 1);
        gaussianFilter->apply(imgGPU, imgGPU);
#endif

        imgGPU.download(frame_gray);
    }
    else
    {
        // 2. Fill in thresholded areas
        morphologyEx(frame_gray, frame_gray, CV_MOP_CLOSE, Mat1b(imgMorphPixels,imgMorphPixels,1), Point(-1, -1), 2);
        GaussianBlur(frame_gray, frame_gray, Size(imgBlurPixels,imgBlurPixels), 1, 1);
        // Select single largest region from image, if singleRegionChoice is selected (1)
    }


    if (singleRegionChoice)
    {
        *skinMask = cannySegmentation(frame_gray, -1, displayFaces);
    }
    else // Detect each separate block and remove blobs smaller than a few pixels
    {
        *skinMask = cannySegmentation(frame_gray, minPixelSize, displayFaces);
    }

    // Just return skin
    Mat frame_skin;
    captureframe.copyTo(frame_skin,*skinMask);  // Copy captureframe data to frame_skin, using mask from frame_ttt
    // Resize image to original before return
    cv::resize(frame_skin,frame_skin,s);

    if (displayFaces)
    {
        imshow("Skin HSV (B)",frame);
        imshow("Adaptive_threshold (D1)",frame_gray);
        imshow("Skin segmented",frame_skin);
    }

    return frame_skin;
    waitKey(1);
}
Esempio n. 12
0
double CKinFuTracker::directRotation(const CKeyFrame::tp_ptr pRefeFrame_, const CKeyFrame::tp_ptr pLiveFrame_, SO3Group<double>* pR_rl_)
{
	Intr sCamIntr_ = pRefeFrame_->_pRGBCamera->getIntrinsics(2);
	Matrix3d K = Matrix3d::Identity();
	//note that camera parameters are 
	K(0, 0) = sCamIntr_.fx;
	K(1, 1) = sCamIntr_.fy;
	K(0, 2) = sCamIntr_.cx;
	K(1, 2) = sCamIntr_.cy;
	SO3Group<double> CurR_rl_ = *pR_rl_;
	SO3Group<double> PrevR_rl_ = *pR_rl_;
	SO3Group<double> MinR_rl_ = *pR_rl_;
	Matrix3d R_rl_Kinv = PrevR_rl_.matrix() *K.inverse();
	Matrix3d H_rl = K * R_rl_Kinv;

	//get R,T of previous 
	Matrix3d H_rl_t = H_rl.transpose();
	Matrix3d R_rl_Kinv_t = R_rl_Kinv.transpose();
	const Matd33&  devH_rl = pcl::device::device_cast<pcl::device::Matd33> (H_rl_t);
	const Matd33&  devR_rl_Kinv = pcl::device::device_cast<pcl::device::Matd33> (R_rl_Kinv_t);
	double dMinEnergy = numeric_limits<double>::max();
	double dPrevEnergy = numeric_limits<double>::max();
	dPrevEnergy = energy_direct_radiance_rotation(sCamIntr_, devR_rl_Kinv, devH_rl, _n_rad_origin_2_ref, _n_rad_live[2], _err_live[2]);
	dMinEnergy = dPrevEnergy;
	//cout << setprecision(15) << dMinEnergy << endl;
	for (short sIter = 0; sIter < 5; ++sIter) {
		//get R and T
		GpuMat gSumBuf = btl::device::direct_rotation(sCamIntr_, devR_rl_Kinv, devH_rl, _n_rad_origin_2_ref, _n_rad_live[2], _err_live[2]);
		Mat Buf; gSumBuf.download(Buf);
		SO3Group<double> R_rl = btl::utility::extractRFromBuffer<double>((double*)Buf.data);
		//cout << Tran_nc.matrix() << endl;
		CurR_rl_ = R_rl *PrevR_rl_;
		R_rl_Kinv = CurR_rl_.matrix()*K.inverse();
		H_rl = K * R_rl_Kinv;

		H_rl_t = H_rl.transpose();
		R_rl_Kinv_t = R_rl_Kinv.transpose();
		double dCurEnergy = energy_direct_radiance_rotation(sCamIntr_, devR_rl_Kinv, devH_rl, _n_rad_origin_2_ref, _n_rad_live[2], _err_live[2]);
		//cout << sIter << ": " << dPrevEnergy << " " << dCurEnergy << endl;
		if (dCurEnergy < dMinEnergy){
			dMinEnergy = dCurEnergy;
			MinR_rl_ = CurR_rl_;
		}
		if (dMinEnergy / dCurEnergy < 0.25){ //divereges
			//cout << "Diverge Warning:" << endl;
			dCurEnergy = dMinEnergy;
			CurR_rl_ = MinR_rl_;
			break;
		}
		PrevR_rl_ = CurR_rl_;
		if (fabs(dPrevEnergy / dCurEnergy - 1) < 0.01f){ //converges
			//cout << "Converges" << endl;
			dCurEnergy = dMinEnergy;
			CurR_rl_ = MinR_rl_;
			break;
		}
		dPrevEnergy = dCurEnergy;
	}
	*pR_rl_ = CurR_rl_;
	return dMinEnergy;
}
Esempio n. 13
0
double CKinFuTracker::dvoICPIC(const CKeyFrame::tp_ptr pRefeFrame_, CKeyFrame::tp_ptr pLiveFrame_, const short asICPIterations_[], SE3Group<double>* pT_rl_, Eigen::Vector4i* pActualIter_) const
{
	SE3Group<double> PrevT_rl = *pT_rl_;
	SE3Group<double> NewT_rl = *pT_rl_;
	//get R,T of previous 
	Matrix3d R_rl_t_tmp = PrevT_rl.so3().inverse().matrix();
	const Matd33&  devR_rl = pcl::device::device_cast<pcl::device::Matd33> (R_rl_t_tmp); //implicit inverse

	Vector3d t_rl = PrevT_rl.translation();
	const double3& devT_rl = pcl::device::device_cast<double3> (t_rl);

	//from low resolution to high
	double dCurEnergy = numeric_limits<double>::max();
	for (short sPyrLevel = pLiveFrame_->pyrHeight() - 1; sPyrLevel >= 0; sPyrLevel--){
		// for each pyramid level we have a min energy and corresponding best R t
		if (asICPIterations_[sPyrLevel] > 0){
			dCurEnergy = btl::device::dvo_icp_energy(pLiveFrame_->_pRGBCamera->getIntrinsics(sPyrLevel),
				devR_rl, devT_rl,
				*pRefeFrame_->_agPyrPts[sPyrLevel], *pRefeFrame_->_agPyrNls[sPyrLevel], _n_rad_ref[sPyrLevel],
				*pLiveFrame_->_agPyrPts[sPyrLevel], *pLiveFrame_->_agPyrNls[sPyrLevel], _n_rad_live[sPyrLevel],
				*pLiveFrame_->_agPyrDepths[sPyrLevel], _err_live[sPyrLevel], *pLiveFrame_->_pry_mask[sPyrLevel]);
			//PRINT(dMinEnergy);
		}

		SE3Group<double> MinT_rl = NewT_rl;
		double dMin = dCurEnergy;
		double dPrevEnergy = dCurEnergy;
		for (short sIter = 0; sIter < asICPIterations_[sPyrLevel]; ++sIter) {
			//get R and T
			GpuMat cvgmSumBuf = btl::device::dvo_icp(pLiveFrame_->_pRGBCamera->getIntrinsics(sPyrLevel),
				devR_rl, devT_rl,
				*pRefeFrame_->_agPyrPts[sPyrLevel], *pRefeFrame_->_agPyrNls[sPyrLevel], _n_rad_ref[sPyrLevel],
				*pLiveFrame_->_agPyrPts[sPyrLevel], *pLiveFrame_->_agPyrNls[sPyrLevel], _n_rad_live[sPyrLevel],
				*pLiveFrame_->_agPyrDepths[sPyrLevel], _err_live[sPyrLevel], *pLiveFrame_->_pry_mask[sPyrLevel]);
			Mat Buf; cvgmSumBuf.download(Buf);
			SE3Group<double> Tran_nc = btl::utility::extractRTFromBuffer<double>((double*)Buf.data);
			NewT_rl = Tran_nc * PrevT_rl;
			R_rl_t_tmp = NewT_rl.so3().inverse().matrix();
			t_rl = NewT_rl.translation();
			dCurEnergy = btl::device::dvo_icp_energy(pLiveFrame_->_pRGBCamera->getIntrinsics(sPyrLevel),
				devR_rl, devT_rl,
				*pRefeFrame_->_agPyrPts[sPyrLevel], *pRefeFrame_->_agPyrNls[sPyrLevel], _n_rad_ref[sPyrLevel],
				*pLiveFrame_->_agPyrPts[sPyrLevel], *pLiveFrame_->_agPyrNls[sPyrLevel], _n_rad_live[sPyrLevel],
				*pLiveFrame_->_agPyrDepths[sPyrLevel], _err_live[sPyrLevel], *pLiveFrame_->_pry_mask[sPyrLevel]);
			//cout << sIter << ": " << dPrevEnergy << " " << dCurEnergy << endl;
			if (dCurEnergy < dMin){
				dMin = dCurEnergy;
				MinT_rl = NewT_rl;
			}
			if (dMin / dCurEnergy > 1.125){ //diverges
				//cout << "Diverge Warning:" << endl;
				//cout <<"New "<< NewT_rl.matrix() << endl;
				//cout <<"Prev" <<PrevT_rl.matrix() << endl;
				NewT_rl = MinT_rl;
				dCurEnergy = dMin;
				break;
			}
			PrevT_rl = NewT_rl;
			if (fabs(dPrevEnergy / dCurEnergy - 1) < 1e-6f){ //converges
				//cout << "Converges" << endl;
				dCurEnergy = dMin;
				NewT_rl = MinT_rl;
				break;
			}
			dPrevEnergy = dCurEnergy;
		}//for each iteration
	}//for pyrlevel
	*pT_rl_ = NewT_rl;
	SE3Group<double> T_rw(pRefeFrame_->_R_cw, pRefeFrame_->_Tw);
	T_rw = NewT_rl.inverse()*T_rw;
	pLiveFrame_->_R_cw = T_rw.so3();
	pLiveFrame_->_Tw = T_rw.translation();

	return dCurEnergy;
}
Esempio n. 14
0
inline
void Stream::enqueueDownload(const GpuMat& src, OutputArray dst)
{
    src.download(dst, *this);
}
Esempio n. 15
0
void showImage(GpuMat& img) {
	cv::Mat m;
	img.download(m);
	showImage(m);
}