Exemplo n.º 1
0
int main(void) {
	printf("Hello World!\n");
	
	cv::Mat depthMat(cv::Size(640,480), CV_16UC1);
	cv::Mat depthf(cv::Size(640,480), CV_8UC1);
	cv::Mat rgbMat(cv::Size(640,480), CV_8UC3, cv::Scalar(0));
	
	std::list<cv::Mat> images;
	
	Freenect::Freenect freenect;
	CvKinect& device = freenect.createDevice<CvKinect>(0);
	
	device.startVideo();
	
	cv::namedWindow("rgb", CV_WINDOW_AUTOSIZE);
	cv::namedWindow("depth", CV_WINDOW_AUTOSIZE);
	
	while (true) {
		device.getVideo(rgbMat);
		
		// if (images.size() > 0) {
		// 	cv::imshow("rgb", images.front());
		// } else {
			cv::imshow("rgb", rgbMat);
		// }
		
		char key = cv::waitKey(5);
		
		if (key == 'x') {
			break;
		} else if (key == 's') {
			images.push_back(rgbMat.clone());
		}
	}
	
	device.stopVideo();
	
	std::cout << "Images: " << images.size() << std::endl;
	
	for (std::list<cv::Mat>::iterator iter = images.begin(); iter != images.end(); ++iter) {
		cv::imshow("rgb", *iter);
	
		cv::Mat greyMat(cv::Size(640, 480), CV_8UC1, cv::Scalar(0));
	
		cv::cvtColor(*iter, greyMat, CV_RGB2GRAY);
		std::vector<cv::Point2f> corners;
		
		bool found = cv::findChessboardCorners(greyMat, cv::Size(7, 7), corners);
		
		std::cout << "Corners found: " << corners.size() << std::endl;
	}
	
	cv::destroyWindow("depth");
	cv::destroyWindow("rgb");
	
	std::cout << "Exit" << std::endl;
	
	return 0;
}
Exemplo n.º 2
0
// Do not call directly even in child
void KinectInputDevice::DepthCallback(void* _depth, uint32_t timestamp) {
    //std::cout << "Depth callback" << std::endl;
    cv::Mat depthMat(cv::Size(640,480),CV_16UC1);
    uint16_t* depth = static_cast<uint16_t*>(_depth);
    depthMat.data = (uchar*) depth;
    pthread_mutex_lock(&m_mutex);
    m_depth = depthMat;
    m_new_depth = true;
    pthread_mutex_unlock(&m_mutex);
}
Exemplo n.º 3
0
cv::Mat ZBufferView::ReadBufferToMatrix()
{
	//glReadBuffer(GL_FRONT);
	//glReadBuffer(GL_BACK);
	glReadBuffer(m_fboHandle);
	glReadPixels(0, 0, m_width, m_height, GL_DEPTH_COMPONENT, GL_FLOAT, m_depthValues);
	// make an OpenCV image
	cv::Mat depthMat(m_height, m_width, CV_32FC1, m_depthValues);
	cv::flip(depthMat, m_depthMat, 0);
	//cv::cvtColor(m_RGBMat, m_BGRMat, CV_RGB2BGR);

	// scale values from 0..1 to 0..255
	m_depthMat.convertTo(m_grayMat, CV_8UC1, 255, 0);

#ifdef _DEBUG
	// check minimal and maximal value
	double min, max;
	cv::minMaxLoc(m_depthMat, &min, &max);
#endif
	return m_grayMat;
}
Exemplo n.º 4
0
int main(int argc, char **argv) {
	bool die(false);
	string filename("snapshot");
	string suffix(".png");
	int i_snap(0),iter(0);
	
	Mat depthMat(Size(640,480),CV_16UC1);
	Mat depthf  (Size(640,480),CV_8UC1);
	Mat rgbMat(Size(640,480),CV_8UC3,Scalar(0));
	Mat ownMat(Size(640,480),CV_8UC3,Scalar(0));
	
	Freenect::Freenect<MyFreenectDevice> freenect;
	MyFreenectDevice& device = freenect.createDevice(0);
	
	device.setTiltDegrees(10.0);
	
	bool registered  = false;
	Mat blobMaskOutput = Mat::zeros(Size(640,480),CV_8UC1),outC;
	Point midBlob;
	
	int startX = 200, sizeX = 180, num_x_reps = 18, num_y_reps = 48;
	double	height_over_num_y_reps = 480/num_y_reps,
			width_over_num_x_reps = sizeX/num_x_reps;
	
	
	vector<double> _d(num_x_reps * num_y_reps); //the descriptor
	Mat descriptorMat(_d);

//	CvNormalBayesClassifier classifier;	//doesnt work
	CvKNearest classifier;
//	CvSVM classifier;	//doesnt work
//	CvBoost classifier;	//only good for 2 classes
//	CvDTree classifier;
	
	
	vector<vector<double> > training_data;
	vector<int>				label_data;
	PCA pca;
	Mat labelMat, dataMat; 
	vector<float> label_counts(4);
	
	bool trained = false, loaded = false;
 	
	device.startVideo();
	device.startDepth();
    while (!die) {
    	device.getVideo(rgbMat);
    	device.getDepth(depthMat);
//        cv::imshow("rgb", rgbMat);
    	depthMat.convertTo(depthf, CV_8UC1, 255.0/2048.0);
				
        cv::imshow("depth",depthf);
		
		//interpolation & inpainting
		{
			Mat _tmp,_tmp1; // = (depthMat - 400.0);          //minimum observed value is ~440. so shift a bit
			Mat(depthMat - 400.0).convertTo(_tmp1,CV_64FC1);
			_tmp.setTo(Scalar(2048), depthMat > 750.0);   //cut off at 600 to create a "box" where the user interacts
//			_tmp.convertTo(depthf, CV_8UC1, 255.0/1648.0);  //values are 0-2048 (11bit), account for -400 = 1648

			//quadratic interpolation
//			cv::pow(_tmp,2.0,_tmp1);
//			_tmp1 = _tmp1 * 4.0;
			
//			try {
//				cv:log(_tmp,_tmp1);
//			}
//			catch (cv::Exception e) {
//				cerr << e.what() << endl;
//				exit(0);
//			}
			
			Point minLoc; double minval,maxval;
			minMaxLoc(_tmp1, &minval, &maxval, NULL, NULL);
			_tmp1.convertTo(depthf, CV_8UC1, 255.0/maxval);
			
			Mat small_depthf; resize(depthf,small_depthf,Size(),0.2,0.2);
			cv::inpaint(small_depthf,(small_depthf == 255),_tmp1,5.0,INPAINT_TELEA);
			
			resize(_tmp1, _tmp, depthf.size());
			_tmp.copyTo(depthf, (depthf == 255));
		}

		{
//			Mat smallDepth = depthf; //cv::resize(depthf,smallDepth,Size(),0.5,0.5);
//			Mat edges; //Laplacian(smallDepth, edges, -1, 7, 1.0);
//			Sobel(smallDepth, edges, -1, 1, 1, 7);
			//medianBlur(edges, edges, 11);
//			for (int x=0; x < edges.cols; x+=20) {
//				for (int y=0; y < edges.rows; y+=20) {
//					//int nz = countNonZero(edges(Range(y,MIN(y+20,edges.rows-1)),Range(x,MIN(x+20,edges.cols-1))));
//					Mat _i = edges(Range(y,MIN(y+20,edges.rows-1)),Range(x,MIN(x+20,edges.cols-1)));
//					medianBlur(_i, _i, 7);
//					//rectangle(edges, Point(x,y), Point(x+20,y+20), Scalar(nz), CV_FILLED);
//				}
//			}
		
//			imshow("edges", edges);
		}
				
		cvtColor(depthf, outC, CV_GRAY2BGR);
		
		Mat blobMaskInput = depthf < 120; //anything not white is "real" depth, TODO: inpainting invalid data
		vector<Point> ctr,ctr2;

		//closest point to the camera
		Point minLoc; double minval,maxval;
		minMaxLoc(depthf, &minval, &maxval, &minLoc, NULL, blobMaskInput);
		circle(outC, minLoc, 5, Scalar(0,255,0), 3);
		
		blobMaskInput = depthf < (minval + 18);
		
		Scalar blb = refineSegments(Mat(),blobMaskInput,blobMaskOutput,ctr,ctr2,midBlob); //find contours in the foreground, choose biggest
//		if (blobMaskOutput.data != NULL) {
//			imshow("first", blobMaskOutput);
//		}
		/////// blb :
		//blb[0] = x, blb[1] = y, blb[2] = 1st blob size, blb[3] = 2nd blob size.
		

		
		if(blb[0]>=0 && blb[2] > 500) { //1st blob detected, and is big enough
			//cvtColor(depthf, outC, CV_GRAY2BGR);
			
			Scalar mn,stdv;
			meanStdDev(depthf,mn,stdv,blobMaskInput);
			
			//cout << "min: " << minval << ", max: " << maxval << ", mean: " << mn[0] << endl;
			
			//now refining blob by looking at the mean depth value it has...
			blobMaskInput = depthf < (mn[0] + stdv[0]);
			
			//(very simple) bias with hand color
			{
				Mat hsv; cvtColor(rgbMat, hsv, CV_RGB2HSV);
				Mat _col_p(hsv.size(),CV_32FC1);
				int jump = 5;
				for (int x=0; x < hsv.cols; x+=jump) {
					for (int y=0; y < hsv.rows; y+=jump) {
						Mat _i = hsv(Range(y,MIN(y+jump,hsv.rows-1)),Range(x,MIN(x+jump,hsv.cols-1)));
						Scalar hsv_mean = mean(_i);
						Vec2i u; u[0] = hsv_mean[0]; u[1] = hsv_mean[1];
						Vec2i v; v[0] = 120; v[1] = 110;
						rectangle(_col_p, Point(x,y), Point(x+jump,y+jump), Scalar(1.0-MIN(norm(u-v)/125.0,1.0)), CV_FILLED);
					}
				}
				//			hsv = hsv - Scalar(0,0,255);
				Mat _t = (Mat_<double>(2,3) << 1, 0, 15,    0, 1, -20);
				Mat col_p(_col_p.size(),CV_32FC1);
				warpAffine(_col_p, col_p, _t, col_p.size());
				GaussianBlur(col_p, col_p, Size(11.0,11.0), 2.5);
				imshow("hand color",col_p);
				//			imshow("rgb",rgbMat);
				Mat blobMaskInput_32FC1; blobMaskInput.convertTo(blobMaskInput_32FC1, CV_32FC1, 1.0/255.0);
				blobMaskInput_32FC1 = blobMaskInput_32FC1.mul(col_p, 1.0);
				blobMaskInput_32FC1.convertTo(blobMaskInput, CV_8UC1, 255.0);
				
				blobMaskInput = blobMaskInput > 128;
				
				imshow("blob bias", blobMaskInput);
			}
			
			
			blb = refineSegments(Mat(),blobMaskInput,blobMaskOutput,ctr,ctr2,midBlob);
			
			imshow("blob", blobMaskOutput);
			
			if(blb[0] >= 0 && blb[2] > 300) {
				//draw contour
				Scalar color(0,0,255);
				for (int idx=0; idx<ctr.size()-1; idx++)
					line(outC, ctr[idx], ctr[idx+1], color, 1);
				line(outC, ctr[ctr.size()-1], ctr[0], color, 1);
				
				if(ctr2.size() > 0) {	//second blob detected
					Scalar color2(255,0,255);
					for (int idx=0; idx<ctr2.size()-1; idx++)
						line(outC, ctr2[idx], ctr2[idx+1], color2, 2);
					line(outC, ctr2[ctr2.size()-1], ctr2[0], color2, 2);
				}
								
				//blob center
				circle(outC, Point(blb[0],blb[1]), 50, Scalar(255,0,0), 3);
				
				{
					Mat hsv; cvtColor(rgbMat, hsv, CV_RGB2HSV);
					Scalar hsv_mean,hsv_stddev; meanStdDev(hsv, hsv_mean, hsv_stddev, blobMaskOutput);
					stringstream ss; ss << hsv_mean[0] << "," << hsv_mean[1] << "," << hsv_mean[2];
					putText(outC, ss.str(), Point(blb[0],blb[1]), CV_FONT_HERSHEY_PLAIN, 1.0, Scalar(0,255,255));
				}
				
				
				Mat blobDepth,blobEdge; 
				depthf.copyTo(blobDepth,blobMaskOutput);
				Laplacian(blobDepth, blobEdge, 8);
//				equalizeHist(blobEdge, blobEdge);//just for visualization
				
				Mat logPolar(depthf.size(),CV_8UC1);
				cvLogPolar(&((IplImage)blobEdge), &((IplImage)logPolar), Point2f(blb[0],blb[1]), 80.0);
				
//				for (int i=0; i<num_x_reps+1; i++) {
//					//verical lines
//					line(logPolar, Point(startX+i*width_over_num_x_reps, 0), Point(startX+i*width_over_num_x_reps,479), Scalar(255), 1);
//				}
//				for(int i=0; i<num_y_reps+1; i++) {			
//					//horizontal
//					line(logPolar, Point(startX, i*height_over_num_y_reps), Point(startX+sizeX,i*height_over_num_y_reps), Scalar(255), 1);
//				}
				
				double total = 0.0;
				
				//histogram
				for (int i=0; i<num_x_reps; i++) {
					for(int j=0; j<num_y_reps; j++) {
						Mat part = logPolar(
										Range(j*height_over_num_y_reps,(j+1)*height_over_num_y_reps),
										 Range(startX+i*width_over_num_x_reps,startX+(i+1)*width_over_num_x_reps)
										 );
						
//						int count = countNonZero(part); //TODO: use calcHist
//						_d[i*num_x_reps + j] = count;

						Scalar mn = mean(part);						
//						part.setTo(Scalar(mn[0])); //for debug: show the value in the image
						_d[i*num_x_reps + j] = mn[0];
						
						
						total += mn[0];
					}
				}
				
				descriptorMat = descriptorMat / total;
				
				/*
				Mat images[1] = {logPolar(Range(0,30),Range(0,30))};
				int nimages = 1;
				int channels[1] = {0};
				int dims = 1;
				float range_0[]={0,256};
				float* ranges[] = { range_0 };
				int histSize[1] = { 5 };
				
				calcHist(, <#int nimages#>, <#const int *channels#>, <#const Mat mask#>, <#MatND hist#>, <#int dims#>, <#const int *histSize#>, <#const float **ranges#>, <#bool uniform#>, <#bool accumulate#>)
				*/
				
//				Mat _tmp(logPolar.size(),CV_8UC1);
//				cvLogPolar(&((IplImage)logPolar), &((IplImage)_tmp),Point2f(blb[0],blb[1]), 80.0, CV_WARP_INVERSE_MAP);
//				imshow("descriptor", _tmp);
//				imshow("logpolar", logPolar);
			}
		}
		
		if(trained) {
			Mat results(1,1,CV_32FC1);
			Mat samples; Mat(Mat(_d).t()).convertTo(samples,CV_32FC1);
			
			Mat samplesAfterPCA = samples; //pca.project(samples);
			
			classifier.find_nearest(&((CvMat)samplesAfterPCA), 1, &((CvMat)results));
//			((float*)results.data)[0] = classifier.predict(&((CvMat)samples))->value;
			
			Mat lc(label_counts); lc *= 0.9;
			
//			label_counts[(int)((float*)results.data)[0]] *= 0.9;
			label_counts[(int)((float*)results.data)[0]] += 0.1;
			Point maxLoc;
			minMaxLoc(lc, NULL, NULL, NULL, &maxLoc);
			int res = maxLoc.y;
			
			stringstream ss; ss << "prediction: ";
			if (res == LABEL_OPEN) {
				ss << "Open hand";
			}
			if (res == LABEL_FIST) {
				ss << "Fist";
			}
			if (res == LABEL_THUMB) {
				ss << "Thumb";
			}
			if (res == LABEL_GARBAGE) {
				ss << "Garbage";
			}
			putText(outC, ss.str(), Point(20,50), CV_FONT_HERSHEY_PLAIN, 3.0, Scalar(0,0,255), 2);
		}
		
		stringstream ss; ss << "samples: " << training_data.size();
		putText(outC, ss.str(), Point(30,outC.rows - 30), CV_FONT_HERSHEY_PLAIN, 2.0, Scalar(0,0,255), 1);
		
		imshow("blobs", outC);
		
		char k = cvWaitKey(5);
		if( k == 27 ){
			break;
		}
		if( k == 8 ) {
			std::ostringstream file;
			file << filename << i_snap << suffix;
			cv::imwrite(file.str(),rgbMat);
			i_snap++;
		}
		if (k == 'g') {
			//put into training as 'garbage'
			training_data.push_back(_d);
			label_data.push_back(LABEL_GARBAGE);
			cout << "learn grabage" << endl;
		}
		if(k == 'o') {
				//put into training as 'open'
			training_data.push_back(_d);
			label_data.push_back(LABEL_OPEN);
			cout << "learn open" << endl;
		}
		if(k == 'f') {
			//put into training as 'fist'
			training_data.push_back(_d);
			label_data.push_back(LABEL_FIST);
			cout << "learn fist" << endl;
		}
		if(k == 'h') {
			//put into training as 'thumb'
			training_data.push_back(_d);
			label_data.push_back(LABEL_THUMB);
			cout << "learn thumb" << endl;
		}
		if (k=='t') {
			//train model
			cout << "train model" << endl;
			if(loaded != true) {
				dataMat = Mat(training_data.size(),_d.size(),CV_32FC1);	//descriptors as matrix rows
				for (uint i=0; i<training_data.size(); i++) {
					Mat v = dataMat(Range(i,i+1),Range::all());
					Mat(Mat(training_data[i]).t()).convertTo(v,CV_32FC1,1.0);
				}
				Mat(label_data).convertTo(labelMat,CV_32FC1);
			}
			
//			pca = pca(dataMat,Mat(),CV_PCA_DATA_AS_ROW,15);
			Mat dataAfterPCA = dataMat;
//			pca.project(dataMat,dataAfterPCA);
			
			classifier.train(&((CvMat)dataAfterPCA), &((CvMat)labelMat));
			
			trained = true;
		}
//		if(k=='p' && trained) {
//			//predict
//			Mat results(1,1,CV_32FC1);
//			Mat samples(1,64,CV_32FC1); Mat(Mat(_d).t()).convertTo(samples,CV_32FC1);
//			classifier.find_nearest(&((CvMat)samples), 1, &((CvMat)results));
//			cout << "prediction: " << ((float*)results.data)[0] << endl;
//		}
		if(k=='s') {
			cout << "save training data" << endl;
//			classifier.save("knn-classifier-open-fist-thumb.yaml"); //not implemented
			dataMat = Mat(training_data.size(),_d.size(),CV_32FC1);	//descriptors as matrix rows
			for (uint i=0; i<training_data.size(); i++) {
				Mat v = dataMat(Range(i,i+1),Range::all());
				Mat(Mat(training_data[i]).t()).convertTo(v,CV_32FC1,1.0);
			}
			Mat(label_data).convertTo(labelMat,CV_32FC1);

			FileStorage fs;
			fs.open("data-samples-labels.yaml", CV_STORAGE_WRITE);
			if (fs.isOpened()) {
				fs << "samples" << dataMat;
				fs << "labels" << labelMat;

				fs << "startX" << startX;
				fs << "sizeX" << sizeX;
				fs << "num_x_reps" << num_x_reps;
				fs << "num_y_reps" << num_y_reps;
				
				loaded = true;
				fs.release();
			} else {
				cerr << "can't open saved data" << endl;
			}
		}
		if(k=='l') {
			cout << "try to load training data" << endl;
			FileStorage fs;
			fs.open("data-samples-labels.yaml", CV_STORAGE_READ);
			if (fs.isOpened()) {
				fs["samples"] >> dataMat;
				fs["labels"] >> labelMat;
				fs["startX"] >> startX;
				fs["sizeX"] >> sizeX;
				fs["num_x_reps"] >> num_x_reps;
				fs["num_y_reps"] >> num_y_reps;
				height_over_num_y_reps = 480/num_y_reps;
				width_over_num_x_reps = sizeX/num_x_reps;
				
				loaded = true;
				fs.release();			
			} else {
Exemplo n.º 5
0
int _tmain( int argc, _TCHAR* argv[] )
{
	cv::setUseOptimized( true );

	// Sensor
	IKinectSensor* pSensor;
	HRESULT hResult = S_OK;
	hResult = GetDefaultKinectSensor( &pSensor );
	if( FAILED( hResult ) ){
		std::cerr << "Error : GetDefaultKinectSensor" << std::endl;
		return -1;
	}

	hResult = pSensor->Open();
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::Open()" << std::endl;
		return -1;
	}

	// Source
	IDepthFrameSource* pDepthSource;
	hResult = pSensor->get_DepthFrameSource( &pDepthSource );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::get_DepthFrameSource()" << std::endl;
		return -1;
	}

	// Reader
	IDepthFrameReader* pDepthReader;
	hResult = pDepthSource->OpenReader( &pDepthReader );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IDepthFrameSource::OpenReader()" << std::endl;
		return -1;
	}

	// Description
	IFrameDescription* pDescription;
	hResult = pDepthSource->get_FrameDescription( &pDescription );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IDepthFrameSource::get_FrameDescription()" << std::endl;
		return -1;
	}

	int width = 0;
	int height = 0;
	pDescription->get_Width( &width ); // 512
	pDescription->get_Height( &height ); // 424
	unsigned int bufferSize = width * height * sizeof( unsigned short );

	cv::Mat bufferMat( height, width, CV_16UC1 );
	cv::Mat depthMat( height, width, CV_8UC1 );
	cv::namedWindow( "Depth" );

	// Coordinate Mapper
	ICoordinateMapper* pCoordinateMapper;
	hResult = pSensor->get_CoordinateMapper( &pCoordinateMapper );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::get_CoordinateMapper()" << std::endl;
		return -1;
	}

	// Create Reconstruction
	NUI_FUSION_RECONSTRUCTION_PARAMETERS reconstructionParameter;
	reconstructionParameter.voxelsPerMeter = 256;
	reconstructionParameter.voxelCountX = 512;
	reconstructionParameter.voxelCountY = 384;
	reconstructionParameter.voxelCountZ = 512;

	Matrix4 worldToCameraTransform;
	SetIdentityMatrix( worldToCameraTransform );
	INuiFusionReconstruction* pReconstruction;
	hResult = NuiFusionCreateReconstruction( &reconstructionParameter, NUI_FUSION_RECONSTRUCTION_PROCESSOR_TYPE_AMP, -1, &worldToCameraTransform, &pReconstruction );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateReconstruction()" << std::endl;
		return -1;
	}

	// Create Image Frame
	// Depth
	NUI_FUSION_IMAGE_FRAME* pDepthFloatImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_FLOAT, width, height, nullptr, &pDepthFloatImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( FLOAT )" << std::endl;
		return -1;
	}

	// SmoothDepth
	NUI_FUSION_IMAGE_FRAME* pSmoothDepthFloatImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_FLOAT, width, height, nullptr, &pSmoothDepthFloatImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( FLOAT )" << std::endl;
		return -1;
	}

	// Point Cloud
	NUI_FUSION_IMAGE_FRAME* pPointCloudImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_POINT_CLOUD, width, height, nullptr, &pPointCloudImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( POINT_CLOUD )" << std::endl;
		return -1;
	}

	// Surface
	NUI_FUSION_IMAGE_FRAME* pSurfaceImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_COLOR, width, height, nullptr, &pSurfaceImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( COLOR )" << std::endl;
		return -1;
	}

	// Normal
	NUI_FUSION_IMAGE_FRAME* pNormalImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_COLOR, width, height, nullptr, &pNormalImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( COLOR )" << std::endl;
		return -1;
	}

	cv::namedWindow( "Surface" );
	cv::namedWindow( "Normal" );

	while( 1 ){
		// Frame
		IDepthFrame* pDepthFrame = nullptr;
		hResult = pDepthReader->AcquireLatestFrame( &pDepthFrame );

		if( SUCCEEDED( hResult ) ){
			hResult = pDepthFrame->AccessUnderlyingBuffer( &bufferSize, reinterpret_cast<UINT16**>( &bufferMat.data ) );
			if( SUCCEEDED( hResult ) ){
				bufferMat.convertTo( depthMat, CV_8U, -255.0f / 8000.0f, 255.0f );
				hResult = pReconstruction->DepthToDepthFloatFrame( reinterpret_cast<UINT16*>( bufferMat.data ), width * height * sizeof( UINT16 ), pDepthFloatImageFrame, NUI_FUSION_DEFAULT_MINIMUM_DEPTH/* 0.5[m] */, NUI_FUSION_DEFAULT_MAXIMUM_DEPTH/* 8.0[m] */, true );
				if( FAILED( hResult ) ){
					std::cerr << "Error :INuiFusionReconstruction::DepthToDepthFloatFrame()" << std::endl;
					return -1;
				}
			}
		}
		SafeRelease( pDepthFrame );

		// Smooting Depth Image Frame
		hResult = pReconstruction->SmoothDepthFloatFrame( pDepthFloatImageFrame, pSmoothDepthFloatImageFrame, 1, 0.04f );
		if( FAILED( hResult ) ){
			std::cerr << "Error :INuiFusionReconstruction::SmoothDepthFloatFrame" << std::endl;
			return -1;
		}

		// Reconstruction Process
		pReconstruction->GetCurrentWorldToCameraTransform( &worldToCameraTransform );
		hResult = pReconstruction->ProcessFrame( pSmoothDepthFloatImageFrame, NUI_FUSION_DEFAULT_ALIGN_ITERATION_COUNT, NUI_FUSION_DEFAULT_INTEGRATION_WEIGHT, nullptr, &worldToCameraTransform );
		if( FAILED( hResult ) ){
			static int errorCount = 0;
			errorCount++;
			if( errorCount >= 100 ) {
				errorCount = 0;
				ResetReconstruction( pReconstruction, &worldToCameraTransform );
			}
		}

		// Calculate Point Cloud
		hResult = pReconstruction->CalculatePointCloud( pPointCloudImageFrame, &worldToCameraTransform );
		if( FAILED( hResult ) ){
			std::cerr << "Error : CalculatePointCloud" << std::endl;
			return -1;
		}

		// Shading Point Clouid
		Matrix4 worldToBGRTransform = { 0.0f };
		worldToBGRTransform.M11 = reconstructionParameter.voxelsPerMeter / reconstructionParameter.voxelCountX;
		worldToBGRTransform.M22 = reconstructionParameter.voxelsPerMeter / reconstructionParameter.voxelCountY;
		worldToBGRTransform.M33 = reconstructionParameter.voxelsPerMeter / reconstructionParameter.voxelCountZ;
		worldToBGRTransform.M41 = 0.5f;
		worldToBGRTransform.M42 = 0.5f;
		worldToBGRTransform.M43 = 0.0f;
		worldToBGRTransform.M44 = 1.0f;

		hResult = NuiFusionShadePointCloud( pPointCloudImageFrame, &worldToCameraTransform, &worldToBGRTransform, pSurfaceImageFrame, pNormalImageFrame );
		if( FAILED( hResult ) ){
			std::cerr << "Error : NuiFusionShadePointCloud" << std::endl;
			return -1;
		}

		cv::Mat surfaceMat( height, width, CV_8UC4, pSurfaceImageFrame->pFrameBuffer->pBits );
		cv::Mat normalMat( height, width, CV_8UC4, pNormalImageFrame->pFrameBuffer->pBits );

		cv::imshow( "Depth", depthMat );
		cv::imshow( "Surface", surfaceMat );
		cv::imshow( "Normal", normalMat );

		int key = cv::waitKey( 30 );
		if( key == VK_ESCAPE ){
			break;
		}
		else if( key == 'r' ){
			ResetReconstruction( pReconstruction, &worldToCameraTransform );
		}
	}

	SafeRelease( pDepthSource );
	SafeRelease( pDepthReader );
	SafeRelease( pDescription );
	SafeRelease( pCoordinateMapper );
	SafeRelease( pReconstruction );
	NuiFusionReleaseImageFrame( pDepthFloatImageFrame );
	NuiFusionReleaseImageFrame( pSmoothDepthFloatImageFrame );
	NuiFusionReleaseImageFrame( pPointCloudImageFrame );
	NuiFusionReleaseImageFrame( pSurfaceImageFrame );
	NuiFusionReleaseImageFrame( pNormalImageFrame );
	if( pSensor ){
		pSensor->Close();
	}
	SafeRelease( pSensor );
	cv::destroyAllWindows();

	return 0;
}
/// <summary>
/// Retrieve depth data from stream frame
/// </summary>
void NuiDepthStream::ProcessDepth()
{
    HRESULT         hr;
    NUI_IMAGE_FRAME imageFrame;

	if (m_Recording)
	{
	//////Initializaing a video writer and allocate an image for recording /////////
		if ((m_TimerCount++)%FramesPerFile==0)
		{
		WCHAR szFilename[MAX_PATH] = { 0 };
		if (SUCCEEDED(GetFileName(szFilename,_countof(szFilename), m_instanceName, DepthSensor)))
			{
				char char_szFilename[MAX_PATH] = {0};
				size_t convertedChars;
				wcstombs_s(&convertedChars,char_szFilename,sizeof(char_szFilename),szFilename,sizeof(char_szFilename));
				m_pwriter=cvCreateVideoWriter(char_szFilename,
				CV_FOURCC('L', 'A', 'G', 'S'),
				//-1,  //user specified 
				FramesPerSecond,m_ImageRes);
				//2,m_ImageRes);
			}
			m_TimerCount%=FramesPerFile;
		}
	}

    // Attempt to get the depth frame
    hr = m_pNuiSensor->NuiImageStreamGetNextFrame(m_hStreamHandle, 0, &imageFrame);
    if (FAILED(hr))
    {
        return;
    }

    if (m_paused)
    {
        // Stream paused. Skip frame process and release the frame.
        goto ReleaseFrame;
    }

    BOOL nearMode;
    INuiFrameTexture* pTexture;

	///FT image texture
	INuiFrameTexture* pFTTexture;

	pFTTexture=imageFrame.pFrameTexture;

    // Get the depth image pixel texture
    hr = m_pNuiSensor->NuiImageFrameGetDepthImagePixelFrameTexture(m_hStreamHandle, &imageFrame, &nearMode, &pTexture);
    if (FAILED(hr))
    {
        goto ReleaseFrame;
    }

    NUI_LOCKED_RECT lockedRect;

	///FT locked rect
	NUI_LOCKED_RECT FTlockedRect;

    // Lock the frame data so the Kinect knows not to modify it while we're reading it
    pTexture->LockRect(0, &lockedRect, NULL, 0);

	// Lock the FT frame data
	pFTTexture->LockRect(0, &FTlockedRect, NULL, 0);

    // Make sure we've received valid data
    if (lockedRect.Pitch != 0)
    {
        // Conver depth data to color image and copy to image buffer
        m_imageBuffer.CopyDepth(lockedRect.pBits, lockedRect.size, nearMode, m_depthTreatment);
		// Convert 8 bits depth frame to 12 bits
		NUI_DEPTH_IMAGE_PIXEL* depthBuffer = (NUI_DEPTH_IMAGE_PIXEL*)lockedRect.pBits;
		cv::Mat depthMat(m_ImageRes.height, m_ImageRes.width, CV_8UC1);
		INT cn = 1;
		for(int i=0;i<depthMat.rows;i++){
		   for(int j=0;j<depthMat.cols;j++){
			  USHORT realdepth = ((depthBuffer->depth)&0x0fff); //Taking 12LSBs for depth
			  BYTE intensity = realdepth == 0 || realdepth > 4095 ? 0 : 255 - (BYTE)(((float)realdepth / 4095.0f) * 255.0f);//Scaling to 255 scale grayscale
			  depthMat.data[i*depthMat.cols*cn + j*cn + 0] = intensity;
			  depthBuffer++;
		   }
		}


		// Copy FT depth data to IFTImage buffer
		memcpy(m_pFTdepthBuffer->GetBuffer(), PBYTE(FTlockedRect.pBits), std::min(m_pFTdepthBuffer->GetBufferSize(), UINT(pFTTexture->BufferLen())));
		
		if (m_Recording)
		{
			//*m_pcolorImage = depthMat;
			//cvWriteFrame(m_pwriter,m_pcolorImage);

			const NUI_SKELETON_FRAME* pSkeletonFrame = m_pPrimaryViewer->getSkeleton();
			m_pDepthInbedAPPs->processFrame(depthMat, lockedRect.pBits, m_ImageRes, pSkeletonFrame);

			//if (m_TimerCount%FramesPerFile==0)
			//{
			//	cvReleaseVideoWriter(&m_pwriter);
			//}
		}

        // Draw out the data with Direct2D
        if (m_pStreamViewer)
        {
            m_pStreamViewer->SetImage(&m_imageBuffer);
        }
    }

    // Done with the texture. Unlock and release it
	pFTTexture->UnlockRect(0);
    pTexture->UnlockRect(0);
    pTexture->Release();

ReleaseFrame:
    // Release the frame
    m_pNuiSensor->NuiImageStreamReleaseFrame(m_hStreamHandle, &imageFrame);
}
Exemplo n.º 7
0
int _tmain(int argc, _TCHAR* argv[])
{
	cv::setUseOptimized( true );

	INuiSensor* pSensor;
	HRESULT hResult = S_OK;
	hResult = NuiCreateSensorByIndex( 0, &pSensor );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiCreateSensorByIndex" << std::endl;
		return -1;
	}

	hResult = pSensor->NuiInitialize( NUI_INITIALIZE_FLAG_USES_COLOR | NUI_INITIALIZE_FLAG_USES_DEPTH_AND_PLAYER_INDEX | NUI_INITIALIZE_FLAG_USES_SKELETON );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiInitialize" << std::endl;
		return -1;
	}

	HANDLE hColorEvent = INVALID_HANDLE_VALUE;
	HANDLE hColorHandle = INVALID_HANDLE_VALUE;
	hColorEvent = CreateEvent( nullptr, true, false, nullptr );
	hResult = pSensor->NuiImageStreamOpen( NUI_IMAGE_TYPE_COLOR, NUI_IMAGE_RESOLUTION_640x480, 0, 2, hColorEvent, &hColorHandle );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiImageStreamOpen( COLOR )" << std::endl;
		return -1;
	}

	HANDLE hDepthPlayerEvent = INVALID_HANDLE_VALUE;
	HANDLE hDepthPlayerHandle = INVALID_HANDLE_VALUE;
	hDepthPlayerEvent = CreateEvent( nullptr, true, false, nullptr );
	hResult = pSensor->NuiImageStreamOpen( NUI_IMAGE_TYPE_DEPTH_AND_PLAYER_INDEX, NUI_IMAGE_RESOLUTION_640x480, 0, 2, hDepthPlayerEvent, &hDepthPlayerHandle );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiImageStreamOpen( DEPTH&PLAYER )" << std::endl;
		return -1;
	}

	HANDLE hSkeletonEvent = INVALID_HANDLE_VALUE;
	hSkeletonEvent = CreateEvent( nullptr, true, false, nullptr );
	hResult = pSensor->NuiSkeletonTrackingEnable( hSkeletonEvent, 0 );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiSkeletonTrackingEnable" << std::endl;
		return -1;
	}

	unsigned long refWidth = 0;
	unsigned long refHeight = 0;
	NuiImageResolutionToSize( NUI_IMAGE_RESOLUTION_640x480, refWidth, refHeight );
	int width = static_cast<int>( refWidth );
	int height = static_cast<int>( refHeight );

	INuiCoordinateMapper* pCordinateMapper;
	hResult = pSensor->NuiGetCoordinateMapper( &pCordinateMapper );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiGetCoordinateMapper" << std::endl;
		return -1;
	}
	std::vector<NUI_COLOR_IMAGE_POINT> pColorPoint( width * height );

	HANDLE hEvents[3] = { hColorEvent, hDepthPlayerEvent, hSkeletonEvent };

	cv::Vec3b color[7];
	color[0] = cv::Vec3b(   0,   0,   0 );
	color[1] = cv::Vec3b( 255,   0,   0 );
	color[2] = cv::Vec3b(   0, 255,   0 );
	color[3] = cv::Vec3b(   0,   0, 255 );
	color[4] = cv::Vec3b( 255, 255,   0 );
	color[5] = cv::Vec3b( 255,   0, 255 );
	color[6] = cv::Vec3b(   0, 255, 255 );

	cv::namedWindow( "Color" );
	cv::namedWindow( "Depth" );
	cv::namedWindow( "Player" );
	cv::namedWindow( "Skeleton" );

	while( 1 ){
		ResetEvent( hColorEvent );
		ResetEvent( hDepthPlayerEvent );
		ResetEvent( hSkeletonEvent );
		WaitForMultipleObjects( ARRAYSIZE( hEvents ), hEvents, true, INFINITE );

		NUI_IMAGE_FRAME colorImageFrame = { 0 };
		hResult = pSensor->NuiImageStreamGetNextFrame( hColorHandle, 0, &colorImageFrame );
		if( FAILED( hResult ) ){
			std::cerr << "Error : NuiImageStreamGetNextFrame( COLOR )" << std::endl;
			return -1;
		}

		INuiFrameTexture* pColorFrameTexture = colorImageFrame.pFrameTexture;
		NUI_LOCKED_RECT colorLockedRect;
		pColorFrameTexture->LockRect( 0, &colorLockedRect, nullptr, 0 );

		NUI_IMAGE_FRAME depthPlayerImageFrame = { 0 };
		hResult = pSensor->NuiImageStreamGetNextFrame( hDepthPlayerHandle, 0, &depthPlayerImageFrame );
		if( FAILED( hResult ) ){
			std::cerr << "Error : NuiImageStreamGetNextFrame( DEPTH&PLAYER )" << std::endl;
			return -1;
		}

		BOOL nearMode = false;
		INuiFrameTexture* pDepthPlayerFrameTexture = nullptr;
		pSensor->NuiImageFrameGetDepthImagePixelFrameTexture( hDepthPlayerHandle, &depthPlayerImageFrame, &nearMode, &pDepthPlayerFrameTexture );
		NUI_LOCKED_RECT depthPlayerLockedRect;
		pDepthPlayerFrameTexture->LockRect( 0, &depthPlayerLockedRect, nullptr, 0 );

		NUI_SKELETON_FRAME skeletonFrame = { 0 };
		hResult = pSensor->NuiSkeletonGetNextFrame( 0, &skeletonFrame );
		if( FAILED( hResult ) ){
			std::cout << "Error : NuiSkeletonGetNextFrame" << std::endl;
			return -1;
		}

		/*
		NUI_TRANSFORM_SMOOTH_PARAMETERS smoothParameter;
		smoothParameter.fSmoothing = 0.5; 
		smoothParameter.fCorrection = 0.5; 
		smoothParameter.fPrediction = 0.0f; 
		smoothParameter.fJitterRadius = 0.05f; 
		smoothParameter.fMaxDeviationRadius = 0.04f;

		hResult = NuiTransformSmooth( &skeletonFrame, &smoothParameter );
		*/

		cv::Mat colorMat( height, width, CV_8UC4, reinterpret_cast<unsigned char*>( colorLockedRect.pBits ) );

		cv::Mat bufferMat = cv::Mat::zeros( height, width, CV_16UC1 );
		cv::Mat playerMat = cv::Mat::zeros( height, width, CV_8UC3 );
		NUI_DEPTH_IMAGE_PIXEL* pDepthPlayerPixel = reinterpret_cast<NUI_DEPTH_IMAGE_PIXEL*>( depthPlayerLockedRect.pBits );
		pCordinateMapper->MapDepthFrameToColorFrame( NUI_IMAGE_RESOLUTION_640x480, width * height, pDepthPlayerPixel, NUI_IMAGE_TYPE_COLOR, NUI_IMAGE_RESOLUTION_640x480, width * height, &pColorPoint[0] );
		for( int y = 0; y < height; y++ ){
			for( int x = 0; x < width; x++ ){
				unsigned int index = y * width + x;
				bufferMat.at<unsigned short>( pColorPoint[index].y, pColorPoint[index].x ) = pDepthPlayerPixel[index].depth;
				playerMat.at<cv::Vec3b>( pColorPoint[index].y, pColorPoint[index].x ) = color[pDepthPlayerPixel[index].playerIndex];
			}
		}
		cv::Mat depthMat( height, width, CV_8UC1 );
		bufferMat.convertTo( depthMat, CV_8U, -255.0f / 10000.0f, 255.0f );

		cv::Mat skeletonMat = cv::Mat::zeros( height, width, CV_8UC3 );
		NUI_COLOR_IMAGE_POINT colorPoint;
		for( int count = 0; count < NUI_SKELETON_COUNT; count++ ){
			NUI_SKELETON_DATA skeletonData = skeletonFrame.SkeletonData[count];
			if( skeletonData.eTrackingState == NUI_SKELETON_TRACKED ){
				for( int position = 0; position < NUI_SKELETON_POSITION_COUNT; position++ ){
					pCordinateMapper->MapSkeletonPointToColorPoint( &skeletonData.SkeletonPositions[position], NUI_IMAGE_TYPE_COLOR, NUI_IMAGE_RESOLUTION_640x480, &colorPoint );
					if( ( colorPoint.x >= 0 ) && ( colorPoint.x < width ) && ( colorPoint.y >= 0 ) && ( colorPoint.y < height ) ){
						cv::circle( skeletonMat, cv::Point( colorPoint.x, colorPoint.y ), 10, static_cast<cv::Scalar>( color[count + 1] ), -1, CV_AA );
					}
				}

				std::stringstream ss;
				ss << skeletonData.SkeletonPositions[NUI_SKELETON_POSITION_HIP_CENTER].z;
				pCordinateMapper->MapSkeletonPointToColorPoint( &skeletonData.SkeletonPositions[NUI_SKELETON_POSITION_HEAD], NUI_IMAGE_TYPE_COLOR, NUI_IMAGE_RESOLUTION_640x480, &colorPoint );
				if( ( colorPoint.x >= 0 ) && ( colorPoint.x < width ) && ( colorPoint.y >= 0 ) && ( colorPoint.y < height ) ){
					cv::putText( skeletonMat, ss.str(), cv::Point( colorPoint.x - 50, colorPoint.y - 20 ), cv::FONT_HERSHEY_SIMPLEX, 1.5f, static_cast<cv::Scalar>( color[count + 1] ) );
				}
			}
			else if( skeletonData.eTrackingState == NUI_SKELETON_POSITION_ONLY ){
				pCordinateMapper->MapSkeletonPointToColorPoint( &skeletonData.SkeletonPositions[NUI_SKELETON_POSITION_HIP_CENTER], NUI_IMAGE_TYPE_COLOR, NUI_IMAGE_RESOLUTION_640x480, &colorPoint );
				if( ( colorPoint.x >= 0 ) && ( colorPoint.x < width ) && ( colorPoint.y >= 0 ) && ( colorPoint.y < height ) ){
						cv::circle( skeletonMat, cv::Point( colorPoint.x, colorPoint.y ), 10, static_cast<cv::Scalar>( color[count + 1] ), -1, CV_AA );
				}
			}
		}

		cv::imshow( "Color", colorMat );
		cv::imshow( "Depth", depthMat );
		cv::imshow( "Player", playerMat );
		cv::imshow( "Skeleton", skeletonMat );

		pColorFrameTexture->UnlockRect( 0 );
		pDepthPlayerFrameTexture->UnlockRect( 0 );
		pSensor->NuiImageStreamReleaseFrame( hColorHandle, &colorImageFrame );
		pSensor->NuiImageStreamReleaseFrame( hDepthPlayerHandle, &depthPlayerImageFrame );

		if( cv::waitKey( 30 ) == VK_ESCAPE ){
			break;
		}
	}

	pSensor->NuiShutdown();
	pSensor->NuiSkeletonTrackingDisable();
	pCordinateMapper->Release();
	CloseHandle( hColorEvent );
	CloseHandle( hDepthPlayerEvent );
	CloseHandle( hSkeletonEvent );

	cv::destroyAllWindows();

	return 0;
}
Exemplo n.º 8
0
int _tmain( int argc, _TCHAR* argv[] )
{
	cv::setUseOptimized( true );

	// Sensor
	IKinectSensor* pSensor;
	HRESULT hResult = S_OK;
	hResult = GetDefaultKinectSensor( &pSensor );
	if( FAILED( hResult ) ){
		std::cerr << "Error : GetDefaultKinectSensor" << std::endl;
		return -1;
	}

	hResult = pSensor->Open();
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::Open()" << std::endl;
		return -1;
	}

	// Source
	IDepthFrameSource* pDepthSource;
	hResult = pSensor->get_DepthFrameSource( &pDepthSource );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::get_DepthFrameSource()" << std::endl;
		return -1;
	}

	// Reader
	IDepthFrameReader* pDepthReader;
	hResult = pDepthSource->OpenReader( &pDepthReader );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IDepthFrameSource::OpenReader()" << std::endl;
		return -1;
	}

	// Description
	IFrameDescription* pDescription;
	hResult = pDepthSource->get_FrameDescription( &pDescription );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IDepthFrameSource::get_FrameDescription()" << std::endl;
		return -1;
	}

	int width = 0;
	int height = 0;
	pDescription->get_Width( &width ); // 512
	pDescription->get_Height( &height ); // 424
	unsigned int bufferSize = width * height * sizeof( unsigned short );

	cv::Mat bufferMat( height, width, CV_16SC1 );
	cv::Mat depthMat( height, width, CV_8UC1 );
	cv::namedWindow( "Depth" );

	while( 1 ){
		// Frame
		IDepthFrame* pDepthFrame = nullptr;
		hResult = pDepthReader->AcquireLatestFrame( &pDepthFrame );
		if( SUCCEEDED( hResult ) ){
			hResult = pDepthFrame->AccessUnderlyingBuffer( &bufferSize, reinterpret_cast<UINT16**>( &bufferMat.data ) );
			if( SUCCEEDED( hResult ) ){
				bufferMat.convertTo( depthMat, CV_8U, -255.0f / 4500.0f, 255.0f );
			}
		}
	
		SafeRelease( pDepthFrame );

		cv::imshow( "Depth", depthMat );

		if( cv::waitKey( 30 ) == VK_ESCAPE ){
			break;
		}
	}

	SafeRelease( pDepthSource );
	SafeRelease( pDepthReader );
	SafeRelease( pDescription );
	if( pSensor ){
		pSensor->Close();
	}
	SafeRelease( pSensor );
	cv::destroyAllWindows();

	return 0;
}
Exemplo n.º 9
0
int _tmain(int argc, _TCHAR* argv[])
{
	cv::setUseOptimized( true );

	// Kinectのインスタンス生成、初期化
	INuiSensor* pSensor;
	HRESULT hResult = S_OK;
	hResult = NuiCreateSensorByIndex( 0, &pSensor );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiCreateSensorByIndex" << std::endl;
		return -1;
	}

	hResult = pSensor->NuiInitialize( NUI_INITIALIZE_FLAG_USES_COLOR | NUI_INITIALIZE_FLAG_USES_DEPTH_AND_PLAYER_INDEX | NUI_INITIALIZE_FLAG_USES_SKELETON );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiInitialize" << std::endl;
		return -1;
	}

	// Colorストリーム
	HANDLE hColorEvent = INVALID_HANDLE_VALUE;
	HANDLE hColorHandle = INVALID_HANDLE_VALUE;
	hColorEvent = CreateEvent( nullptr, true, false, nullptr );
	hResult = pSensor->NuiImageStreamOpen( NUI_IMAGE_TYPE_COLOR, NUI_IMAGE_RESOLUTION_640x480, 0, 2, hColorEvent, &hColorHandle );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiImageStreamOpen( COLOR )" << std::endl;
		return -1;
	}

	// Depth&Playerストリーム
	HANDLE hDepthPlayerEvent = INVALID_HANDLE_VALUE;
	HANDLE hDepthPlayerHandle = INVALID_HANDLE_VALUE;
	hDepthPlayerEvent = CreateEvent( nullptr, true, false, nullptr );
	hResult = pSensor->NuiImageStreamOpen( NUI_IMAGE_TYPE_DEPTH_AND_PLAYER_INDEX, NUI_IMAGE_RESOLUTION_640x480, 0, 2, hDepthPlayerEvent, &hDepthPlayerHandle );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiImageStreamOpen( DEPTH&PLAYER )" << std::endl;
		return -1;
	}

	// Skeletonストリーム
	HANDLE hSkeletonEvent = INVALID_HANDLE_VALUE;
	hSkeletonEvent = CreateEvent( nullptr, true, false, nullptr );
	hResult = pSensor->NuiSkeletonTrackingEnable( hSkeletonEvent, 0 );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiSkeletonTrackingEnable" << std::endl;
		return -1;
	}

	HANDLE hEvents[3] = { hColorEvent, hDepthPlayerEvent, hSkeletonEvent };

	// カラーテーブル
	cv::Vec3b color[7];
	color[0] = cv::Vec3b(   0,   0,   0 );
	color[1] = cv::Vec3b( 255,   0,   0 );
	color[2] = cv::Vec3b(   0, 255,   0 );
	color[3] = cv::Vec3b(   0,   0, 255 );
	color[4] = cv::Vec3b( 255, 255,   0 );
	color[5] = cv::Vec3b( 255,   0, 255 );
	color[6] = cv::Vec3b(   0, 255, 255 );

	cv::namedWindow( "Color" );
	cv::namedWindow( "Depth" );
	cv::namedWindow( "Player" );
	cv::namedWindow( "Skeleton" );

	while( 1 ){
		// フレームの更新待ち
		ResetEvent( hColorEvent );
		ResetEvent( hDepthPlayerEvent );
		ResetEvent( hSkeletonEvent );
		WaitForMultipleObjects( ARRAYSIZE( hEvents ), hEvents, true, INFINITE );

		// Colorカメラからフレームを取得
		NUI_IMAGE_FRAME pColorImageFrame = { 0 };
		hResult = pSensor->NuiImageStreamGetNextFrame( hColorHandle, 0, &pColorImageFrame );
		if( FAILED( hResult ) ){
			std::cerr << "Error : NuiImageStreamGetNextFrame( COLOR )" << std::endl;
			return -1;
		}

		// Depthセンサーからフレームを取得
		NUI_IMAGE_FRAME pDepthPlayerImageFrame = { 0 };
		hResult = pSensor->NuiImageStreamGetNextFrame( hDepthPlayerHandle, 0, &pDepthPlayerImageFrame );
		if( FAILED( hResult ) ){
			std::cerr << "Error : NuiImageStreamGetNextFrame( DEPTH&PLAYER )" << std::endl;
			return -1;
		}

		// Skeletonフレームを取得
		NUI_SKELETON_FRAME pSkeletonFrame = { 0 };
		hResult = pSensor->NuiSkeletonGetNextFrame( 0, &pSkeletonFrame );
		if( FAILED( hResult ) ){
			std::cout << "Error : NuiSkeletonGetNextFrame" << std::endl;
			return -1;
		}

		// Color画像データの取得
		INuiFrameTexture* pColorFrameTexture = pColorImageFrame.pFrameTexture;
		NUI_LOCKED_RECT sColorLockedRect;
		pColorFrameTexture->LockRect( 0, &sColorLockedRect, nullptr, 0 );

		// Depthデータの取得
		INuiFrameTexture* pDepthPlayerFrameTexture = pDepthPlayerImageFrame.pFrameTexture;
		NUI_LOCKED_RECT sDepthPlayerLockedRect;
		pDepthPlayerFrameTexture->LockRect( 0, &sDepthPlayerLockedRect, nullptr, 0 );

		// 表示
		cv::Mat colorMat( 480, 640, CV_8UC4, reinterpret_cast<uchar*>( sColorLockedRect.pBits ) );

		LONG registX = 0;
		LONG registY = 0;
		ushort* pBuffer = reinterpret_cast<ushort*>( sDepthPlayerLockedRect.pBits );
		cv::Mat bufferMat = cv::Mat::zeros( 480, 640, CV_16UC1 );
		cv::Mat playerMat = cv::Mat::zeros( 480, 640, CV_8UC3 );
		for( int y = 0; y < 480; y++ ){
			for( int x = 0; x < 640; x++ ){
				pSensor->NuiImageGetColorPixelCoordinatesFromDepthPixelAtResolution( NUI_IMAGE_RESOLUTION_640x480, NUI_IMAGE_RESOLUTION_640x480, nullptr, x, y, *pBuffer, &registX, &registY );
				if( ( registX >= 0 ) && ( registX < 640 ) && ( registY >= 0 ) && ( registY < 480 ) ){
					bufferMat.at<ushort>( registY, registX ) = *pBuffer & 0xFFF8;
					playerMat.at<cv::Vec3b>( registY, registX ) = color[*pBuffer & 0x7];
				}
				pBuffer++;
			}
		}
		cv::Mat depthMat( 480, 640, CV_8UC1 );
		bufferMat.convertTo( depthMat, CV_8UC3, -255.0f / NUI_IMAGE_DEPTH_MAXIMUM, 255.0f );

		cv::Mat skeletonMat = cv::Mat::zeros( 480, 640, CV_8UC3 );
		cv::Point2f point;
		for( int count = 0; count < NUI_SKELETON_COUNT; count++ ){
			NUI_SKELETON_DATA skeleton = pSkeletonFrame.SkeletonData[count];
			if( skeleton.eTrackingState == NUI_SKELETON_TRACKED ){
				for( int position = 0; position < NUI_SKELETON_POSITION_COUNT; position++ ){
					NuiTransformSkeletonToDepthImage( skeleton.SkeletonPositions[position], &point.x, &point.y, NUI_IMAGE_RESOLUTION_640x480 );
					cv::circle( skeletonMat, point, 10, static_cast<cv::Scalar>( color[count + 1] ), -1, CV_AA );
				}
			}
		}
		
		cv::imshow( "Color", colorMat );
		cv::imshow( "Depth", depthMat );
		cv::imshow( "Player", playerMat );
		cv::imshow( "Skeleton", skeletonMat );

		// フレームの解放
		pColorFrameTexture->UnlockRect( 0 );
		pDepthPlayerFrameTexture->UnlockRect( 0 );
		pSensor->NuiImageStreamReleaseFrame( hColorHandle, &pColorImageFrame );
		pSensor->NuiImageStreamReleaseFrame( hDepthPlayerHandle, &pDepthPlayerImageFrame );

		// ループの終了判定(Escキー)
		if( cv::waitKey( 30 ) == VK_ESCAPE ){
			break;
		}
	}

	// Kinectの終了処理
	pSensor->NuiShutdown();
	pSensor->NuiSkeletonTrackingDisable();
	CloseHandle( hColorEvent );
	CloseHandle( hDepthPlayerEvent );
	CloseHandle( hSkeletonEvent );
	CloseHandle( hColorHandle );
	CloseHandle( hDepthPlayerHandle );

	cv::destroyAllWindows();

	return 0;
}