コード例 #1
0
bool KinectCapture::GetAlignedColorImage(cv::Mat & image)
{
	if (m_alignedColorImagePixelBuffer == nullptr)
	{
		return false;
	}
	//Acquire unaligned color image
	if (!GetColorImage(image))
	{
		return false;
	}

	//Align Color Image

		for (int j = 0; j < m_DepthHeight; j++)
		{
			for (int i = 0; i < m_DepthWidth; i++) 
			{
				// Determine color pixel for depth pixel i,j
				long x;
				long y;
						NuiImageGetColorPixelCoordinatesFromDepthPixelAtResolution(
							m_colorImageResolution, // color frame resolution
							m_depthImageResolution, // depth frame resolution
							NULL,                         // pan/zoom of color image (IGNORE THIS)
							i, j, m_depthImagePixelBuffer[j*m_DepthWidth + i],                  // Column, row, and depth in depth image
							&x, &y        // Output: column and row (x,y) in the color image
							);
				// If out of bounds, then do not color this pixel
				if (x < 0 || y < 0 || x > m_ColorWidth || y > m_ColorHeight)
				{
					for (int ch = 0; ch < 3; ch++)
					{
						m_alignedColorImagePixelBuffer[4 * (j*m_DepthWidth + i) + ch] = 0;
					}
					m_alignedColorImagePixelBuffer[4 * (j*m_DepthWidth + i) + 3] = 255;
				}
				else {
					// Determine rgb color for depth pixel (i,j) from color pixel (x,y)
					for (int ch = 0; ch < 3; ch++)
					{
						m_alignedColorImagePixelBuffer[4 * (j*m_DepthWidth + i) + ch] = m_colorImagePixelBuffer[4 * (y*m_ColorWidth + x) + ch];
					}
					m_alignedColorImagePixelBuffer[4 * (j*m_DepthWidth + i) + 3] = 255;
				}
			}
		}
	
	cv::Mat colorImg(m_DepthHeight, m_DepthWidth, CV_8UC4, m_alignedColorImagePixelBuffer);
	image = colorImg;
	return true;
}
コード例 #2
0
Mat ImageProcessor::make8BitColorDistanceImage(Mat edges16Bit, int min_mm, int max_mm)
{
	float range = max_mm - min_mm;

	//create 3channel 8 bit image matrix
	Mat colorImg(edges16Bit.rows, edges16Bit.cols, CV_8UC3);

	//iterate over edge image, compute color values from edge pixel values and write to color image
	MatIterator_<unsigned short> it, end; //16 bit iterator
	int pixel = 0;

	for(it = edges16Bit.begin<unsigned short>(), end = edges16Bit.end<unsigned short>(); it != end; it++, pixel++)
	{
		unsigned short value = *it;
		if(value != '\0') //not black = zero
		{
			int blue, red;

			//avoid senseless distance interval settings
			if(value < min_mm) { blue = 0; red = 255; }
			else if(value > max_mm) { blue = 255; red = 0; }
			else
			{
				blue = (value - min_mm) / range * 255;
				red = 255-blue;
			}

			colorImg.at<cv::Vec3b>(pixel)[0] = blue;
			colorImg.at<cv::Vec3b>(pixel)[1] = 0;
			colorImg.at<cv::Vec3b>(pixel)[2] = red;
		}
		else
		{
			colorImg.at<cv::Vec3b>(pixel)[0] = 0;
			colorImg.at<cv::Vec3b>(pixel)[1] = 0;
			colorImg.at<cv::Vec3b>(pixel)[2] = 0;
		}
	}

	return colorImg;
}
コード例 #3
0
//---------------------------------------------------------------------------------------
int main(int argc, char** argv)
{
  // Setup the kinect
  ethos::cpr::Kinect kinect;
  if (!kinect.Initialize())
  {
    return -1;
  }

  bClicked = false;

  // The image holders
  cv::Mat depthImg, colorImg, skeletalImg, maskOverColorImg, maskImg, maskedDepthImg, labelImg;
  bool foundSkeleton;
  cv::Mat CroppedDepth, CroppedColor;
  cv::Mat CaliDepth;
  
  cv::Size dSize = cv::Size(320, 240);

  // 3-channel versions of image holders for depth and color
  // (skeleton already 3-channels)
  cv::Mat depthImg3, colorImg3;

  // Save images?
  const bool saveDebugImages = true;
  char imgFilename[1024*1024];
  int imgFrameCounter = 0;
  if (saveDebugImages)
  {
#ifdef USE_DEPTH_IMAGES
    _mkdir("depth");
#endif
#ifdef USE_COLOR_IMAGES
    _mkdir("color");
#endif
#ifdef USE_SKELETAL_IMAGES
    _mkdir("skeleton");
#endif
  }

  // Save joint values?
  const bool saveJoints = true;
  char jointFilename[1024];
  int jointFrameCounter = 0;
  if (saveJoints)
  {
#ifdef USE_SKELETAL_IMAGES
    _mkdir("joints");
#endif
  }

  // Grab images until user says to stop
  while (true)
  {
#ifdef USE_DEPTH_IMAGES
    // Grab next frames from the kinect
    if (!kinect.GetDepthImage(depthImg))
    {
      break;
    }
    cv::imshow("Depth", depthImg);
#endif

#ifdef USE_COLOR_IMAGES
    if (!kinect.GetColorImage(colorImg))
    {
      break;
    }
    cv::imshow("Color", colorImg);
#endif

#ifdef USE_SKELETAL_IMAGES
    if (!kinect.GetSkeletalImage(skeletalImg, &foundSkeleton))
    {
      break;
    }
    //const ethos::cpr::SkeletonJoints& joints = kinect.GetSkeletalJoints();
    cv::imshow("Skeleton", skeletalImg);
#endif

#ifdef USE_COLOR_AND_DEPTH_IMAGES
	if (!kinect.GetDepthImage(depthImg))
	{
		break;
	}
	cv::imshow("Depth", depthImg);
	if (!kinect.GetColorImage(colorImg))
	{
		break;
	}
	cv::imshow("Color", colorImg);
#endif

	cvSetMouseCallback( "Depth", on_mouse, 0 );

	/*
	***************************************************************************************************
	//Calibration
	{
		cv::Point3f depthPt;
		cv::Point3f colorPt;
		CaliDepth = depthImg;
		CaliDepth.setTo(0);

		for (int j = 0; j < depthImg.rows; j ++)
		{
			uchar* data_depth= depthImg.ptr<uchar>(j); 
			for (int i = 0; i < depthImg.cols; i ++)
			{				
				depthPt.z = data_depth[i] * 5.0f;	           //z, according to the previous convert factor: depthF /= 1300.0f; depthF *= 255.0f. Revert back;
				depthPt.x = depthPt.z * (i - 324.3) / 526.7;   //x
				depthPt.y = depthPt.z * (i - 247.8) / 525.8;   //y

				//convert to color camera 2D
				colorPt.x =  509.98f * depthPt.x - 18.124f * depthPt.y + 349.569f * depthPt.z - 11661.2f; //x
				colorPt.y = -11.765f * depthPt.x + 512.72f * depthPt.y + 273.624f * depthPt.z + 153.29f;  //y
				colorPt.z = -0.0496f * depthPt.x - 0.0502f * depthPt.y + 0.9975f  * depthPt.z + 7.4660f;  //w

				colorPt.x = (int)(colorPt.x / colorPt.z);  //normalize by w
				colorPt.y = (int)(colorPt.y / colorPt.z);  //normalize by w

				//check boundary
				if (colorPt.x > depthImg.cols || colorPt.x < 0 ||
					colorPt.y > depthImg.cols || colorPt.y < 0)
					continue;

				int ii = colorPt.y;  //ii is the transformed x in color image
				int jj = colorPt.x;  //jj is the transformed y in color image

				CaliDepth.ptr<uchar>(jj)[ii] = data_depth[i]; //assign the same depth value from the original depth image with a new position (ii,jj)
			}
		}

		cv::imshow("Cali_Depth", CaliDepth );

		
	}
***************************************************************************************************
*/


	LONG m_depthWidth = 640;
	LONG m_depthHeight = 480;
	LONG m_colorWidth = 640;
	LONG m_colorHeight = 480;

	LONG* m_colorCoordinates = kinect.GetColorCoordinates();

	cv::Mat display(480,640,CV_8UC3,cv::Scalar(0));
	

	// loop over each row and column of the color
	for (LONG y = 0; y < m_colorHeight; ++y)
	{
		LONG* pDest = (LONG*)((BYTE*)colorImg.data + colorImg.cols * y);
		unsigned char* pColor = display.ptr<unsigned char>(y);
		for (LONG x = 0; x < m_colorWidth; ++x, pColor += 3)
		{
			// calculate index into depth array
			int depthIndex = x + y * m_depthWidth;

			// retrieve the depth to color mapping for the current depth pixel
			LONG colorInDepthX = m_colorCoordinates[depthIndex * 2];
			LONG colorInDepthY = m_colorCoordinates[depthIndex * 2 + 1];

			// make sure the depth pixel maps to a valid point in color space
			if ( colorInDepthX >= 0 && colorInDepthX < m_colorWidth && colorInDepthY >= 0 && colorInDepthY < m_colorHeight )
			{
				// calculate index into color array
				LONG colorIndex = colorInDepthX + colorInDepthY * m_colorWidth;

				// set source for copy to the color pixel
				LONG* pSrc = (LONG *)(BYTE*)colorImg.data + colorIndex;
				*pDest = *pSrc;
			}
			else
			{
				*pDest = 0;
			}

			// Fill-in color image
			{
				LONG val = *pDest;
				unsigned char* pVal = (unsigned char*)&val;
				pColor[0] = *pVal++;
				pColor[1] = *pVal++;
				pColor[2] = *pVal++;
			}

			pDest++;
		}
	}

	cv::imshow("Display", display);
	cv::waitKey();




	//set Callback function, only call once per run

	if (bClicked == true)
	{
		//We start to process the image

		cv::Point leftUpCorner = ROI_Vertices[0];
		cv::Point rightDownCorner = ROI_Vertices[1];

		cv::Rect myROI(leftUpCorner.x, leftUpCorner.y, rightDownCorner.x - leftUpCorner.x, rightDownCorner.y - leftUpCorner.y);

		CroppedDepth = depthImg(myROI);
		CroppedColor = colorImg(myROI);

		//cv::bilateralFilter(CroppedDepth, CroppedDepth, CV_BILATERAL, 3, 0);
		for (int i = 0; i < 2; i ++)
		{
			cv::GaussianBlur(CroppedDepth, CroppedDepth, cv::Size(3,3), 0, 0, cv::BORDER_DEFAULT);
		}

		cv::imshow("new_Depth", CroppedDepth );
		cv::imshow("new_Color", CroppedColor);


		
		/*

		std::vector<cv::Point> ROI_Poly;
		cv::approxPolyDP(ROI_Vertices, ROI_Poly, 1.0, true);
		cv::fillConvexPoly(maskImg, &ROI_Poly[0], ROI_Poly.size(), cv::Scalar(255,255,255));                 
		colorImg.copyTo(maskOverColorImg, maskImg);


		inRange(maskOverColorImg, cv::Scalar(15, 15, 15), cv::Scalar(124, 154, 95), maskedDepthImg);
		//inRange(maskOverColorImg, cv::Scalar(50, 65, 40), cv::Scalar(124, 154, 95), maskedDepthImg);
		cv::imshow("Mask", maskOverColorImg);

		int erosion_size = 1;   
		cv::Mat element = cv::getStructuringElement(cv::MORPH_ELLIPSE,
			cv::Size(2 * erosion_size + 1, 2 * erosion_size + 1), 
			cv::Point(erosion_size, erosion_size) );
		
		for (int j=0; j<maskedDepthImg.rows; j++)  
		{  
			uchar* data_depth= maskedDepthImg.ptr<uchar>(j);  
			uchar* data_mask = maskImg.ptr<uchar>(j);
			for (int i=0; i<maskedDepthImg.cols; i++)  
			{                   
				if (data_depth[i] == data_mask[i])
				{
					data_depth[i] = 0;
				}
				else
				{
					data_depth[i] = 255;
				}
				 
			}  
		}  
		for (int i = 0 ; i < 4; i ++)
			cv::erode(maskedDepthImg, maskedDepthImg, element);	
		for (int i = 0; i < 5; i ++)
			cv::dilate(maskedDepthImg, maskedDepthImg, element);


		std::vector<std::vector<cv::Point> > contours;
		std::vector<cv::Vec4i> hierarchy;
		findContours( maskedDepthImg, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);

		if( !contours.empty() && !hierarchy.empty() )
		{
			for (int idx=0;idx < contours.size();idx++)
			{
				drawContours(maskedDepthImg,contours,idx,cv::Scalar::all(255),CV_FILLED,8);
			}
		}

		cv::imshow("Depth_Mask", maskedDepthImg);
		maskImg.setTo(0);
		depthImg.copyTo(maskImg, maskedDepthImg);
		cv::imshow("Final", maskImg);

		resize(maskImg, maskImg, dSize);
		*/
	}

	
    // (Optionally) save output debugging images
    if (saveDebugImages)
    {
#ifdef USE_DEPTH_IMAGES
      sprintf(imgFilename, "depth/depthFrame_%04d.png", imgFrameCounter);
      prepareToSave(depthImg, depthImg3);
      cv::imwrite(imgFilename, depthImg3);
#endif

#ifdef USE_COLOR_IMAGES
      sprintf(imgFilename, "color/colorFrame_%04d.png", imgFrameCounter);
      prepareToSave(colorImg, colorImg3);
      cv::imwrite(imgFilename, colorImg3);
#endif

#ifdef USE_SKELETAL_IMAGES
      sprintf(imgFilename, "skeleton/skeletonFrame_%04d.png", imgFrameCounter);
      cv::imwrite(imgFilename, skeletalImg);
#endif

#ifdef USE_COLOR_AND_DEPTH_IMAGES
	  int key = cv::waitKey(1);
	  if (key == 's')
	  //if (bClicked == true)
	  {
// 		  sprintf(imgFilename, "depth/depthFrame_%04d.png", imgFrameCounter);
// 		  prepareToSave(depthImg, depthImg3);
// 		  cv::imwrite(imgFilename, depthImg3);

		  sprintf(imgFilename, "depth_mask/depthFrame_%04d.jpg", imgFrameCounter);
		  prepareToSave(CroppedDepth, depthImg3);
		  cv::imwrite(imgFilename, depthImg3);

		  sprintf(imgFilename, "color/colorFrame_%04d.png", imgFrameCounter);
		  prepareToSave(CroppedColor, colorImg3);
		  cv::imwrite(imgFilename, colorImg3);
	  }
#endif

      ++imgFrameCounter;
    }

    // (Optionally) save joint values
    //if (saveJoints && foundSkeleton)
    //{
#ifdef USE_SKELETAL_IMAGES
      sprintf(jointFilename, "joints/jointFrame_%04d.txt", jointFrameCounter);
      std::ofstream ofs(jointFilename);
      kinect.SaveSkeletalJoints(ofs);
      ofs.close();
#endif

    //  ++jointFrameCounter;
   // }

    // Check for user keyboard input to quit early
    int key = cv::waitKey(1);
    if (key == 'q')
    {
      break;
    }
  }

  // Exit application
  return 0;
}
コード例 #4
0
bool KinectCapture::GetColorImage(cv::Mat& image)
{
	NUI_IMAGE_FRAME imageFrame;
	NUI_LOCKED_RECT lockedRect;
	HRESULT hr;
	// Get a color frame from Kinect
	image = cv::Mat::zeros(m_ColorHeight, m_ColorWidth,CV_8UC4);
	hr = m_NuiSensor->NuiImageStreamGetNextFrame(m_ColorStreamHandle, 100, &imageFrame);
	if (FAILED(hr))
	{
		if (hr == E_FAIL)
		{
			std::cout << "Kinect NuiImageStreamGetNextFrame call failed with unspecified"<< std::endl;
		}
		if (hr == E_INVALIDARG)
		{
			std::cout << "Kinect NuiImageStreamGetNextFrame call failed with dwFlag parameter is NULL" << std::endl;
		}
		if (hr == E_NUI_DEVICE_NOT_READY)
		{
			std::cout << "Kinect NuiImageStreamGetNextFrame call failed with kinect not initialized" << std::endl;
		}
		if (hr == E_OUTOFMEMORY)
		{
			std::cout << "Kinect NuiImageStreamGetNextFrame call failed with out of memory" << std::endl;
		}
		if (hr == E_POINTER)
		{
			std::cout << "Kinect NuiImageStreamGetNextFrame call failed with invalid handle" << std::endl;
		}
		if (hr == E_NUI_FRAME_NO_DATA)
		{
			return false;
		}
		return false;
	}
	INuiFrameTexture* colorTexture = imageFrame.pFrameTexture;
	// Lock the frame data to access depth data
	hr = colorTexture->LockRect(0, &lockedRect, NULL, 0);
	if (FAILED(hr) || lockedRect.Pitch == 0)
	{
		std::cout << "Error getting color texture pixels." << std::endl;
		return false;
	}
	// Copy the depth pixels so we can return the image frame
	errno_t err = memcpy_s(m_colorImagePixelBuffer, 4 * m_ColorImagePixels * sizeof(BYTE), lockedRect.pBits, colorTexture->BufferLen());
	colorTexture->UnlockRect(0);
	if (0 != err)
	{
		std::cout << "Error copying  color texture pixels." << std::endl;
		return false;
	}
	// Release the Kinect camera frame
	m_NuiSensor->NuiImageStreamReleaseFrame(m_ColorStreamHandle, &imageFrame);

	if (FAILED(hr))
	{
		std::cout << "Kinect Color stream NuiImageStreamReleaseFrame call failed." << std::endl;
		return false;
	}
	cv::Mat colorImg(m_ColorHeight, m_ColorWidth, CV_8UC4, m_colorImagePixelBuffer);
	image = colorImg;
	return true;
}