示例#1
0
KinectColor::KinectColor(IKinectSensor *m_pKinectSensor) {
	width = new int();
	height = new int();
	bufferSize = new unsigned int();

	HRESULT hr;
	IColorFrameSource* pColorFrameSource = NULL;

	hr = m_pKinectSensor->get_ColorFrameSource(&pColorFrameSource);

	if (SUCCEEDED(hr))
	{
		hr = pColorFrameSource->OpenReader(&m_pColorFrameReader);
	}

	IFrameDescription* pDescription;
	hr = pColorFrameSource->get_FrameDescription(&pDescription);

	if (SUCCEEDED(hr))
	{
		pDescription->get_Width(width);
		pDescription->get_Height(height);
		*bufferSize = *width * *height * 4 * sizeof(unsigned char);

		bufferMat = new cv::Mat(*height, *width, CV_8UC4);
		colorMat = new cv::Mat(HEIGHT, WIDTH, CV_8UC4);

		memset(&baseImage, 0, sizeof(BASEIMAGE));
		CreateXRGB8ColorData(&baseImage.ColorData);
		baseImage.MipMapCount = 0;
		handle = -1;
	}

	SafeRelease(pColorFrameSource);
}
	void Microsoft2Grabber::BodyIndexFrameArrived(IBodyIndexFrameReference* pBodyIndexFrameReference) {
		IBodyIndexFrame* pBodyIndexFrame = NULL;
		HRESULT hr = pBodyIndexFrameReference->AcquireFrame(&pBodyIndexFrame);
		if(FAILED(hr))
			return;
		//cout << "got a body index frame" << endl;
		IFrameDescription* pBodyIndexFrameDescription = NULL;
		int nBodyIndexWidth = 0;
		int nBodyIndexHeight = 0;
		UINT nBodyIndexBufferSize = 0;
		BYTE *pBodyIndexBuffer = NULL;

		// get body index frame data
		if (SUCCEEDED(hr)) {
			hr = pBodyIndexFrame->get_FrameDescription(&pBodyIndexFrameDescription);
		}
		if (SUCCEEDED(hr)) {
			hr = pBodyIndexFrameDescription->get_Width(&nBodyIndexWidth);
		}
		if (SUCCEEDED(hr)) {
			hr = pBodyIndexFrameDescription->get_Height(&nBodyIndexHeight);
		}
		if (SUCCEEDED(hr)) {
			hr = pBodyIndexFrame->AccessUnderlyingBuffer(&nBodyIndexBufferSize, &pBodyIndexBuffer);            
		}
		SafeRelease(pBodyIndexFrameDescription);
		SafeRelease(pBodyIndexFrame);
	}
HRESULT KinectHDFaceGrabber::initDepthFrameReader()
{
	
	IDepthFrameSource* depthFrameSource = nullptr;
	
	HRESULT hr = m_pKinectSensor->get_DepthFrameSource(&depthFrameSource);
	
	IFrameDescription* frameDescription = nullptr;
	if (SUCCEEDED(hr)){
		hr = depthFrameSource->get_FrameDescription(&frameDescription);
	}

	if (SUCCEEDED(hr)){
		hr = frameDescription->get_Width(&m_depthWidth);
	}

	if (SUCCEEDED(hr)){
		hr = frameDescription->get_Height(&m_depthHeight);
	}

	if (SUCCEEDED(hr)){
		m_depthBuffer.resize(m_depthHeight * m_depthWidth);
	}

	SafeRelease(frameDescription);
	if (SUCCEEDED(hr)){
		hr = depthFrameSource->OpenReader(&m_pDepthFrameReader);
	}

	SafeRelease(depthFrameSource);
	return hr;
}
HRESULT KinectHDFaceGrabber::initColorFrameReader()
{
	IColorFrameSource* pColorFrameSource = nullptr;
	HRESULT hr = m_pKinectSensor->get_ColorFrameSource(&pColorFrameSource);
	
	if (SUCCEEDED(hr)){
		hr = pColorFrameSource->OpenReader(&m_pColorFrameReader);
	}

	IFrameDescription* pFrameDescription = nullptr;
	if (SUCCEEDED(hr))
	{
		hr = pColorFrameSource->get_FrameDescription(&pFrameDescription);
	}

	if (SUCCEEDED(hr))
	{
		hr = pFrameDescription->get_Width(&m_colorWidth);
	}

	if (SUCCEEDED(hr))
	{
		hr = pFrameDescription->get_Height(&m_colorHeight);
	}

	if (SUCCEEDED(hr)){
		m_colorBuffer.resize(m_colorHeight * m_colorWidth);
	}

	SafeRelease(pFrameDescription);
	SafeRelease(pColorFrameSource);
	
	return hr;
}
示例#5
0
void KinectCapture::GetColorFrame(IMultiSourceFrame* pMultiFrame)
{
	IColorFrameReference* pColorFrameReference = NULL;
	IColorFrame* pColorFrame = NULL;
	pMultiFrame->get_ColorFrameReference(&pColorFrameReference);
	HRESULT hr = pColorFrameReference->AcquireFrame(&pColorFrame);

	if (SUCCEEDED(hr))
	{
		if (pColorRGBX == NULL)
		{
			IFrameDescription* pFrameDescription = NULL;
			hr = pColorFrame->get_FrameDescription(&pFrameDescription);
			hr = pFrameDescription->get_Width(&nColorFrameWidth);
			hr = pFrameDescription->get_Height(&nColorFrameHeight);
			pColorRGBX = new RGB[nColorFrameWidth * nColorFrameHeight];
			SafeRelease(pFrameDescription);
		}

		UINT nBufferSize = nColorFrameWidth * nColorFrameHeight * sizeof(RGB);
		hr = pColorFrame->CopyConvertedFrameDataToArray(nBufferSize, reinterpret_cast<BYTE*>(pColorRGBX), ColorImageFormat_Bgra);
	}

	SafeRelease(pColorFrame);
	SafeRelease(pColorFrameReference);
}
示例#6
0
void KinectCapture::GetDepthFrame(IMultiSourceFrame* pMultiFrame)
{
	IDepthFrameReference* pDepthFrameReference = NULL;
	IDepthFrame* pDepthFrame = NULL;
	pMultiFrame->get_DepthFrameReference(&pDepthFrameReference);
	HRESULT hr = pDepthFrameReference->AcquireFrame(&pDepthFrame);

	if (SUCCEEDED(hr))
	{
		if (pDepth == NULL)
		{
			IFrameDescription* pFrameDescription = NULL;
			hr = pDepthFrame->get_FrameDescription(&pFrameDescription);
			pFrameDescription->get_Width(&nDepthFrameWidth);
			pFrameDescription->get_Height(&nDepthFrameHeight);
			pDepth = new UINT16[nDepthFrameHeight * nDepthFrameWidth];
			SafeRelease(pFrameDescription);
		}

		UINT nBufferSize = nDepthFrameHeight * nDepthFrameWidth;
		hr = pDepthFrame->CopyFrameDataToArray(nBufferSize, pDepth);
	}

	SafeRelease(pDepthFrame);
	SafeRelease(pDepthFrameReference);
}
cv::Mat capKinect::update(cv::Mat& depth_show)
{
	if (!m_pDepthReader) return cv::Mat();
	IDepthFrame* pDepthFrame = NULL;

	HRESULT hr = m_pDepthReader->AcquireLatestFrame(&pDepthFrame);
	cv::Mat re;

	if (SUCCEEDED(hr))
	{
		IFrameDescription* pFrameDescription = NULL;
		int nWidth = 0;
		int nHeight = 0;
		USHORT nDepthMinReliableDistance = 0;
		USHORT nDepthMaxDistance = 0;
		UINT nBufferSize = 0;
		UINT16 *pBuffer = NULL;
		if (SUCCEEDED(hr))
		{
			hr = pDepthFrame->get_FrameDescription(&pFrameDescription);
		}
		if (SUCCEEDED(hr))
		{
			hr = pFrameDescription->get_Width(&nWidth);
		}
		if (SUCCEEDED(hr))
		{
			hr = pFrameDescription->get_Height(&nHeight);
		}
		if (SUCCEEDED(hr))
		{
			hr = pDepthFrame->get_DepthMinReliableDistance(&nDepthMinReliableDistance);
		}
		if (SUCCEEDED(hr))
		{
			// In order to see the full range of depth (including the less reliable far field depth)  
			// we are setting nDepthMaxDistance to the extreme potential depth threshold  
			nDepthMaxDistance = USHRT_MAX; //here we set maxDepth as 1000 mm (1 m) to simply cut the back background

			// Note:  If you wish to filter by reliable depth distance, uncomment the following line.  
			//// hr = pDepthFrame->get_DepthMaxReliableDistance(&nDepthMaxDistance);  
		}

		if (SUCCEEDED(hr))
		{
			hr = pDepthFrame->AccessUnderlyingBuffer(&nBufferSize, &pBuffer);
		}

		if (SUCCEEDED(hr))
		{
			re=capture(pBuffer, nWidth, nHeight, depth_show, nDepthMinReliableDistance, nDepthMaxDistance);
		}

		if(pFrameDescription)SafeRelease(pFrameDescription);
	}

	if(pDepthFrame)SafeRelease(pDepthFrame);
	return re;
}
IFrameDescription* Kinect2StreamImpl::getFrameDescription(OniSensorType sensorType)
{
  if (!m_pKinectSensor) {
    return NULL;
  }

  IFrameDescription* frameDescription = NULL;
  if (sensorType == ONI_SENSOR_COLOR) {
    IColorFrameSource* frameSource = NULL;
    HRESULT hr = m_pKinectSensor->get_ColorFrameSource(&frameSource);
    if (SUCCEEDED(hr)) {
      hr = frameSource->get_FrameDescription(&frameDescription);
      if (FAILED(hr) && frameDescription) {
        frameDescription->Release();
        frameDescription = NULL;
      }
    }
    if (frameSource) {
      frameSource->Release();
    }
  }
  else if (sensorType == ONI_SENSOR_DEPTH) {
    IDepthFrameSource* frameSource = NULL;
    HRESULT hr = m_pKinectSensor->get_DepthFrameSource(&frameSource);
    if (SUCCEEDED(hr)) {
      hr = frameSource->get_FrameDescription(&frameDescription);
      if (FAILED(hr) && frameDescription) {
        frameDescription->Release();
        frameDescription = NULL;
      }
    }
    if (frameSource) {
      frameSource->Release();
    }
  }
  else { // ONI_SENSOR_IR
    IInfraredFrameSource* frameSource = NULL;
    HRESULT hr = m_pKinectSensor->get_InfraredFrameSource(&frameSource);
    if (SUCCEEDED(hr)) {
      hr = frameSource->get_FrameDescription(&frameDescription);
      if (FAILED(hr) && frameDescription) {
        frameDescription->Release();
        frameDescription = NULL;
      }
    }
    if (frameSource) {
      frameSource->Release();
    }
  }

  return frameDescription;
}
示例#9
0
// コンストラクタ
KinectV2::KinectV2()
{
  // センサを取得する
  if (sensor == NULL && GetDefaultKinectSensor(&sensor) == S_OK)
  {
    HRESULT hr;

    // センサの使用を開始する
    hr = sensor->Open();
    assert(hr == S_OK);

    // デプスデータの読み込み設定
    IDepthFrameSource *depthSource;
    hr = sensor->get_DepthFrameSource(&depthSource);
    assert(hr == S_OK);
    hr = depthSource->OpenReader(&depthReader);
    assert(hr == S_OK);
    IFrameDescription *depthDescription;
    hr = depthSource->get_FrameDescription(&depthDescription);
    assert(hr == S_OK);
    depthSource->Release();

    // デプスデータのサイズを得る
    depthDescription->get_Width(&depthWidth);
    depthDescription->get_Height(&depthHeight);
    depthDescription->Release();

    // カラーデータの読み込み設定
    IColorFrameSource *colorSource;
    hr = sensor->get_ColorFrameSource(&colorSource);
    assert(hr == S_OK);
    hr = colorSource->OpenReader(&colorReader);
    assert(hr == S_OK);
    IFrameDescription *colorDescription;
    hr = colorSource->get_FrameDescription(&colorDescription);
    assert(hr == S_OK);
    colorSource->Release();

    // カラーデータのサイズを得る
    colorDescription->get_Width(&colorWidth);
    colorDescription->get_Height(&colorHeight);
    colorDescription->Release();

    // 座標のマッピング
    hr = sensor->get_CoordinateMapper(&coordinateMapper);
    assert(hr == S_OK);

    // depthCount と colorCount を計算してテクスチャとバッファオブジェクトを作成する
    makeTexture();

    // デプスデータからカメラ座標を求めるときに用いる一時メモリを確保する
    position = new GLfloat[depthCount][3];

    // カラーデータを変換する用いる一時メモリを確保する
    color = new GLubyte[colorCount * 4];
  }
}
	void Microsoft2Grabber::DepthFrameArrived(IDepthFrameReference* pDepthFrameReference) {
		IDepthFrame* pDepthFrame = NULL;
		HRESULT hr = pDepthFrameReference->AcquireFrame(&pDepthFrame);
		//HRESULT hr = pDepthFrameReference->AcquireLatestFrame(&pDepthFrame);
		if(FAILED(hr))
			return;
		//cout << "got a depth frame" << endl;
		INT64 nDepthTime = 0;
		IFrameDescription* pDepthFrameDescription = NULL;
		int nDepthWidth = 0;
		int nDepthHeight = 0;
		UINT nDepthBufferSize = 0;

		// get depth frame data
		hr = pDepthFrame->get_RelativeTime(&nDepthTime);
		if (SUCCEEDED(hr)) {
			hr = pDepthFrame->get_FrameDescription(&pDepthFrameDescription);
		}
		if (SUCCEEDED(hr)) {
			hr = pDepthFrameDescription->get_Width(&nDepthWidth);
		}
		if (SUCCEEDED(hr)) {
			hr = pDepthFrameDescription->get_Height(&nDepthHeight);
		}
		if (SUCCEEDED(hr)) {
			hr = pDepthFrame->AccessUnderlyingBuffer(&nDepthBufferSize, &m_pDepthBuffer);
			//WaitForSingleObject(hDepthMutex,INFINITE);
			Mat tmp = Mat(m_depthSize, DEPTH_PIXEL_TYPE, m_pDepthBuffer, Mat::AUTO_STEP);
			MatDepth depth_img = *((MatDepth*)&(tmp.clone()));
			m_depthTime = nDepthTime;
			if (depth_image_signal_->num_slots () > 0) {
				depth_image_signal_->operator()(depth_img);
			}
			if (num_slots<sig_cb_microsoft_point_cloud_rgba>() > 0 || all_data_signal_->num_slots() > 0 || image_depth_image_signal_->num_slots() > 0) {
				//rgb_sync_.add1 (depth_img, m_depthTime);
				imageDepthOnlyImageCallback(depth_img);
			}
			
			//ReleaseMutex(hDepthMutex);
		}
		SafeRelease(pDepthFrameDescription);
		SafeRelease(pDepthFrame);
	}
示例#11
0
bool BodyIndexStream::open()
{
    if (!m_Device->isOpen()) {
        ofLogWarning("ofxKinect2::BodyIndexStream") << "No ready Kinect2 found.";
        return false;
    }

    m_IsInvert = true;
    IBodyIndexFrameSource *frameSource = nullptr;
    HRESULT hr = E_FAIL;

    hr = m_Device->get().kinect2->get_BodyIndexFrameSource(&frameSource);
    if (SUCCEEDED(hr)) {
        hr = frameSource->OpenReader(&m_StreamHandle.bodyIndexFrameReader);

        if (SUCCEEDED(hr)) {
            IFrameDescription *frameDescription = nullptr;
            frameSource->get_FrameDescription(&frameDescription);
            if (SUCCEEDED(hr)) {
                int resX, resY = 0;
                hr = frameDescription->get_Width(&resX);
                hr = frameDescription->get_Width(&resY);
                m_Frame.mode.resolutionX = resX;
                m_Frame.mode.resolutionY = resY;
                m_Frame.width = resX;
                m_Frame.height = resY;
                m_DoubleBuffer.allocate(resX, resY, 4);

            }
            safeRelease(frameDescription);
        }
    }

    safeRelease(frameSource);
    if (FAILED(hr)) {
        ofLogWarning("ofxKinect2::BodyIndexStream") << "Can't open stream.";
        return false;
    }

    return Stream::open();
}
示例#12
0
		//----------
		void Color::update(IColorFrame * frame) {
			this->isFrameNewFlag = true;
			IFrameDescription * frameDescription = NULL;
			try {
				//allocate pixels and texture if we need to
				if (FAILED(frame->get_FrameDescription(&frameDescription))) {
					throw Exception("Failed to get frame description");
				}

				int width, height;
				if (FAILED(frameDescription->get_Width(&width)) || FAILED(frameDescription->get_Height(&height))) {
					throw Exception("Failed to get width and height of frame");
				}
				if (width != this->pixels.getWidth() || height != this->texture.getHeight()) {
					this->pixels.allocate(width, height, OF_IMAGE_COLOR_ALPHA);
					this->texture.allocate(this->pixels);
				}

				//update local rgba image
				if (FAILED(frame->CopyConvertedFrameDataToArray(this->pixels.size(), this->pixels.getPixels(), ColorImageFormat_Rgba))) {
					throw Exception("Couldn't pull pixel buffer");
				}
				if (this->useTexture) {
					this->texture.loadData(this->pixels);
				}

				//update yuv
				if (this->yuvPixelsEnabled) {
					if (width != this->yuvPixels.getWidth() || height != this->yuvPixels.getHeight()) {
						this->yuvPixels.allocate(width, height, OF_PIXELS_YUY2);
					}
					if (FAILED(frame->CopyRawFrameDataToArray(this->yuvPixels.size(), this->yuvPixels.getPixels()))) {
						throw Exception("Couldn't pull raw YUV pixel buffer");
					}
				}

				//update field of view
				if (FAILED(frameDescription->get_HorizontalFieldOfView(&this->horizontalFieldOfView))) {
					throw Exception("Failed to get horizonal field of view");
				}
				if (FAILED(frameDescription->get_VerticalFieldOfView(&this->verticalFieldOfView))) {
					throw Exception("Failed to get vertical field of view");
				}
				if (FAILED(frameDescription->get_DiagonalFieldOfView(&this->diagonalFieldOfView))) {
					throw Exception("Failed to get diagonal field of view");
				}

				IColorCameraSettings * cameraSettings;
				if (FAILED(frame->get_ColorCameraSettings(&cameraSettings))) {
					throw Exception("Failed to get color camera settings");
				}
				cameraSettings->get_ExposureTime(&this->exposure);
				cameraSettings->get_FrameInterval(&this->frameInterval);
				cameraSettings->get_Gain(&this->gain);
				cameraSettings->get_Gamma(&this->gamma);
			} catch (std::exception & e) {
				OFXKINECTFORWINDOWS2_ERROR << e.what();
			}
			SafeRelease(frameDescription);
		}
XnDouble Kinect2StreamImpl::getVerticalFov()
{
  IFrameDescription* frameDescription = NULL;
  if (m_sensorType == ONI_SENSOR_DEPTH && m_imageRegistrationMode == ONI_IMAGE_REGISTRATION_DEPTH_TO_COLOR) {
    frameDescription = getFrameDescription(ONI_SENSOR_COLOR);
  }
  else {
    frameDescription = getFrameDescription(m_sensorType);
  }

  if (frameDescription == NULL) {
    return 0;
  }

  float fov;
  HRESULT hr = frameDescription->get_VerticalFieldOfView(&fov);
  frameDescription->Release();
  if (FAILED(hr)) {
    return 0;
  }
  return fov;
}
示例#14
0
bool ColorStream::open()
{
    if (!m_Device->isOpen()) {
        ofLogWarning("ofxKinect2::ColorStream") << "No ready Kinect2 found.";
        return false;
    }

    IColorFrameSource *colorFrameSource = nullptr;
    HRESULT hr = m_Device->get().kinect2->get_ColorFrameSource(&colorFrameSource);

    if (SUCCEEDED(hr)) {
        hr = colorFrameSource->OpenReader(&m_StreamHandle.colorFrameReader);
    }

    IFrameDescription *colorFrameDescription = nullptr;
    colorFrameSource->get_FrameDescription(&colorFrameDescription);

    if (SUCCEEDED(hr)) {
        int resolutionX, resolutionY = 0;
        hr = colorFrameDescription->get_Width(&resolutionX);
        hr = colorFrameDescription->get_Width(&resolutionY);
        m_Frame.mode.resolutionX = resolutionX;
        m_Frame.mode.resolutionY = resolutionY;
        m_Frame.width = resolutionX;
        m_Frame.height = resolutionY;
        m_DoubleBuffer.allocate(resolutionX, resolutionY, 4);

    }

    safeRelease(colorFrameDescription);
    safeRelease(colorFrameSource);
    if (FAILED(hr)) {
        ofLogWarning("ofxKinect2::ColorStream") << "Can't open stream.";
        return false;
    }

    return Stream::open();
}
void BodyIndexMulticaster::ProcessNewBodyIndexFrame(IBodyIndexFrame* frame)
{
	IFrameDescription* frameDescription = NULL;
	frame->get_FrameDescription(&frameDescription);

	int width = 0;
	int height = 0;
	frameDescription->get_Height(&height);
	frameDescription->get_Width(&width);

	UINT nBufferSize = height*width*sizeof(BYTE);

	UINT capacity;

	HRESULT hr = frame->AccessUnderlyingBuffer(&capacity, &pBuffer); if (!SUCCEEDED(hr)) return;

	if (pBuffer && (width == K4W2_BODY_INDEX_WIDTH) && (height == K4W2_BODY_INDEX_HEIGHT))
	{
		// send previous frame first

		// encode current frame, will be send in the next cycle
		BYTE* pInput	= pBuffer;
		BYTE* pOutput	= pScaledBuffer;

		const BYTE* pEnd = pInput + (width * height);

		while (pInput < pEnd)
		{
			BYTE index = *pInput;
			*pOutput = ((signed char)index + 1)*40;
            
			++pOutput;
			++pInput;
		}
		gstSender.SendFrame((unsigned char*) pScaledBuffer, nBufferSize);
	}
	SafeRelease(frameDescription);
}
		//----------
		void Color::update() {
			CHECK_OPEN

			IColorFrame * frame = NULL;
			IFrameDescription * frameDescription = NULL;
			try {
				//acquire frame
				if (FAILED(this->reader->AcquireLatestFrame(&frame))) {
					return; // we often throw here when no new frame is available
				}

				//allocate pixels and texture if we need to
				if (FAILED(frame->get_FrameDescription(&frameDescription))) {
					throw Exception("Failed to get frame description");
				}

				int width, height;
				if (FAILED(frameDescription->get_Width(&width)) || FAILED(frameDescription->get_Height(&height))) {
					throw Exception("Failed to get width and height of frame");
				}
				if (width != this->pixels.getWidth() || height != this->texture.getHeight()) {
					this->pixels.allocate(width, height, OF_IMAGE_COLOR_ALPHA);
					this->texture.allocate(this->pixels);
				}

				//update local assets
				if (FAILED(frame->CopyConvertedFrameDataToArray(this->pixels.size(), this->pixels.getPixels(), ColorImageFormat_Rgba))) {
					throw Exception("Couldn't pull pixel buffer");
				}
				if (this->useTexture) {
					this->texture.loadData(this->pixels);
				}

				//update field of view
				if (FAILED(frameDescription->get_HorizontalFieldOfView(&this->horizontalFieldOfView))) {
					throw Exception("Failed to get horizonal field of view");
				}
				if (FAILED(frameDescription->get_VerticalFieldOfView(&this->verticalFieldOfView))) {
					throw Exception("Failed to get vertical field of view");
				}
				if (FAILED(frameDescription->get_DiagonalFieldOfView(&this->diagonalFieldOfView))) {
					throw Exception("Failed to get diagonal field of view");
				}

				IColorCameraSettings * cameraSettings;
				if (FAILED(frame->get_ColorCameraSettings(&cameraSettings))) {
					throw Exception("Failed to get color camera settings");
				}
				cameraSettings->get_ExposureTime(&this->exposure);
				cameraSettings->get_FrameInterval(&this->frameInterval);
				cameraSettings->get_Gain(&this->gain);
				cameraSettings->get_Gamma(&this->gamma);
			} catch (std::exception & e) {
				OFXKINECTFORWINDOWS2_ERROR << e.what();
			}
			SafeRelease(frameDescription);
			SafeRelease(frame);
		}
int main(int argc, char** argv)
{
	// 1a. Get default Sensor
	cout << "Try to get default sensor" << endl;
	IKinectSensor* pSensor = nullptr;
	if (GetDefaultKinectSensor(&pSensor) != S_OK)
	{
		cerr << "Get Sensor failed" << endl;
		return -1;
	}

	// 1b. Open sensor
	cout << "Try to open sensor" << endl;
	if (pSensor->Open() != S_OK)
	{
		cerr << "Can't open sensor" << endl;
		return -1;
	}

	// 2a. Get frame source
	cout << "Try to get body index source" << endl;
	IBodyIndexFrameSource* pFrameSource = nullptr;
	if (pSensor->get_BodyIndexFrameSource(&pFrameSource) != S_OK)
	{
		cerr << "Can't get body index frame source" << endl;
		return -1;
	}

	// 2b. Get frame description
	cout << "get body index frame description" << endl;
	int		iWidth = 0;
	int		iHeight = 0;
	IFrameDescription* pFrameDescription = nullptr;
	if (pFrameSource->get_FrameDescription(&pFrameDescription) == S_OK)
	{
		pFrameDescription->get_Width(&iWidth);
		pFrameDescription->get_Height(&iHeight);
	}
	pFrameDescription->Release();
	pFrameDescription = nullptr;

	// 3a. get frame reader
	cout << "Try to get body index frame reader" << endl;
	IBodyIndexFrameReader* pFrameReader = nullptr;
	if (pFrameSource->OpenReader(&pFrameReader) != S_OK)
	{
		cerr << "Can't get body index frame reader" << endl;
		return -1;
	}

	// 2c. release Frame source
	cout << "Release frame source" << endl;
	pFrameSource->Release();
	pFrameSource = nullptr;

	// Prepare OpenCV data
	cv::Mat	mImg(iHeight, iWidth, CV_8UC3);
	cv::namedWindow("Body Index Image");

	// color array
	cv::Vec3b aColorTable[7] = {
		cv::Vec3b(255,0,0),
		cv::Vec3b(0,255,0),
		cv::Vec3b(0,0,255),
		cv::Vec3b(255,255,0),
		cv::Vec3b(255,0,255),
		cv::Vec3b(0,255,255),
		cv::Vec3b(0,0,0),
	};

	// Enter main loop
	while (true)
	{
		// 4a. Get last frame
		IBodyIndexFrame* pFrame = nullptr;
		if (pFrameReader->AcquireLatestFrame(&pFrame) == S_OK)
		{
			// 4c. Fill OpenCV image
			UINT uSize = 0;
			BYTE* pBuffer = nullptr;
			if (pFrame->AccessUnderlyingBuffer(&uSize,&pBuffer) == S_OK)
			{
				for (int y = 0; y < iHeight; ++y)
				{
					for (int x = 0; x < iWidth; ++x)
					{
						int uBodyIdx = pBuffer[x + y * iWidth];
						if (uBodyIdx < 6)
							mImg.at<cv::Vec3b>(y, x) = aColorTable[uBodyIdx];
						else
							mImg.at<cv::Vec3b>(y, x) = aColorTable[6];
					}
				}
				cv::imshow("Body Index Image", mImg);
			}
			else
			{
				cerr << "Data access error" << endl;
			}

			// 4e. release frame
			pFrame->Release();
		}

		// 4f. check keyboard input
		if (cv::waitKey(30) == VK_ESCAPE){
			break;
		}
	}

	// 3b. release frame reader
	cout << "Release frame reader" << endl;
	pFrameReader->Release();
	pFrameReader = nullptr;

	// 1c. Close Sensor
	cout << "close sensor" << endl;
	pSensor->Close();

	// 1d. Release Sensor
	cout << "Release sensor" << endl;
	pSensor->Release();
	pSensor = nullptr;

	return 0;
}
int main(int argc, char** argv)
{
	// 1a. Get default Sensor
	cout << "Try to get default sensor" << endl;
	IKinectSensor* pSensor = nullptr;
	if (GetDefaultKinectSensor(&pSensor) != S_OK)
	{
		cerr << "Get Sensor failed" << endl;
	}
	else
	{
		// 1b. Open sensor
		cout << "Try to open sensor" << endl;
		if (pSensor->Open() != S_OK)
		{
			cerr << "Can't open sensor" << endl;
		}
		else
		{
			// 2a. Get frame source
			cout << "Try to get source" << endl;
			IDepthFrameSource* pFrameSource = nullptr;
			if (pSensor->get_DepthFrameSource(&pFrameSource) != S_OK)
			{
				cerr << "Can't get frame source" << endl;
			}
			else
			{
				// 2b. Get frame description
				int		iWidth = 0;
				int		iHeight = 0;
				IFrameDescription* pFrameDescription = nullptr;
				if (pFrameSource->get_FrameDescription(&pFrameDescription) == S_OK)
				{
					pFrameDescription->get_Width(&iWidth);
					pFrameDescription->get_Height(&iHeight);
					pFrameDescription->Release();
					pFrameDescription = nullptr;
				}

				// 2c. get some dpeth only meta
				UINT16 uDepthMin = 0, uDepthMax = 0;
				pFrameSource->get_DepthMinReliableDistance(&uDepthMin);
				pFrameSource->get_DepthMaxReliableDistance(&uDepthMax);
				cout << "Reliable Distance: " << uDepthMin << " - " << uDepthMax << endl;

				// perpare OpenCV
				cv::Mat mDepthImg(iHeight, iWidth, CV_16UC1);
				cv::Mat mImg8bit(iHeight, iWidth, CV_8UC1);
				cv::namedWindow( "Depth Map" );

				// 3a. get frame reader
				cout << "Try to get frame reader" << endl;
				IDepthFrameReader* pFrameReader = nullptr;
				if (pFrameSource->OpenReader(&pFrameReader) != S_OK)
				{
					cerr << "Can't get frame reader" << endl;
				}
				else
				{
					// Enter main loop
					cout << "Enter main loop" << endl;
					while (true)
					{
						// 4a. Get last frame
						IDepthFrame* pFrame = nullptr;
						if (pFrameReader->AcquireLatestFrame(&pFrame) == S_OK)
						{
							// 4c. copy the depth map to image
							if (pFrame->CopyFrameDataToArray(iWidth * iHeight, reinterpret_cast<UINT16*>(mDepthImg.data)) == S_OK)
							{
								// 4d. convert from 16bit to 8bit
								mDepthImg.convertTo(mImg8bit, CV_8U, 255.0f / uDepthMax);
								cv::imshow("Depth Map", mImg8bit);
							}
							else
							{
								cerr << "Data copy error" << endl;
							}

							// 4e. release frame
							pFrame->Release();
						}

						// 4f. check keyboard input
						if (cv::waitKey(30) == VK_ESCAPE){
							break;
						}
					}

					// 3b. release frame reader
					cout << "Release frame reader" << endl;
					pFrameReader->Release();
					pFrameReader = nullptr;
				}

				// 2d. release Frame source
				cout << "Release frame source" << endl;
				pFrameSource->Release();
				pFrameSource = nullptr;
			}

			// 1c. Close Sensor
			cout << "close sensor" << endl;
			pSensor->Close();
		}

		// 1d. Release Sensor
		cout << "Release sensor" << endl;
		pSensor->Release();
		pSensor = nullptr;
	}
	return 0;
}
示例#19
0
	void processColor() {
		if (!device) return;
		if (!m_pColorFrameReader) return;
		
		IColorFrame* pColorFrame = NULL;
		HRESULT hr = m_pColorFrameReader->AcquireLatestFrame(&pColorFrame);
		if (SUCCEEDED(hr)) {
			INT64 nTime = 0;
			IFrameDescription* pFrameDescription = NULL;
			int nWidth = 0;
			int nHeight = 0;
			ColorImageFormat imageFormat = ColorImageFormat_None;
			UINT nBufferSize = 0;
			RGBQUAD *src = NULL;

			hr = pColorFrame->get_RelativeTime(&nTime);
			if (SUCCEEDED(hr)) {
				hr = pColorFrame->get_FrameDescription(&pFrameDescription);
			}
			if (SUCCEEDED(hr)) {
				hr = pFrameDescription->get_Width(&nWidth);
			}
			if (SUCCEEDED(hr)) {
				hr = pFrameDescription->get_Height(&nHeight);
			}
			if (SUCCEEDED(hr)) {
				hr = pColorFrame->get_RawColorImageFormat(&imageFormat);
			}

			if (imageFormat != ColorImageFormat_Bgra)
			{
				if (!rgb_buffer) {
					rgb_buffer = new RGBQUAD[nWidth * nHeight];
				}

				//post("image format %d", imageFormat);
				//error("not brga");
				nBufferSize = nWidth * nHeight * sizeof(RGBQUAD);
				hr = pColorFrame->CopyConvertedFrameDataToArray(nBufferSize, reinterpret_cast<BYTE*>(rgb_buffer), ColorImageFormat_Rgba);
				if (FAILED(hr)) {
					error("failed to convert image");
					return;
				}

				src = rgb_buffer;
			}


			hr = pColorFrame->AccessRawUnderlyingBuffer(&nBufferSize, reinterpret_cast<BYTE**>(&src));
			ARGB * dst = (ARGB *)rgb_mat.back;
			int cells = nWidth * nHeight;

			//if (align_depth_to_color) {
				for (int i = 0; i < cells; ++i) {
					dst[i].r = src[i].rgbRed;
					dst[i].g = src[i].rgbGreen;
					dst[i].b = src[i].rgbBlue;
				}
			/*}
			else {
				// align color to depth:
				//std::fill(dst, dst + cells, RGB(0, 0, 0));
				for (int i = 0; i < cells; ++i) {
					int c = colorCoordinates[i * 2];
					int r = colorCoordinates[i * 2 + 1];
					if (c >= 0 && c < KINECT_DEPTH_WIDTH
						&& r >= 0 && r < KINECT_DEPTH_HEIGHT) {
						// valid location: depth value:
						int idx = r*KINECT_DEPTH_WIDTH + c;
						dst[i].r = src[idx].r;
						dst[i].g = src[idx].g;
						dst[i].b = src[idx].b;
					}
				}
			}*/

			new_rgb_data = 1;

		}

		
	}
示例#20
0
int _tmain( int argc, _TCHAR* argv[] )
{
	cv::setUseOptimized( true );

	// Sensor
	IKinectSensor* pSensor;
	HRESULT hResult = S_OK;
	hResult = GetDefaultKinectSensor( &pSensor );
	if( FAILED( hResult ) ){
		std::cerr << "Error : GetDefaultKinectSensor" << std::endl;
		return -1;
	}

	hResult = pSensor->Open( );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::Open()" << std::endl;
		return -1;
	}

	// Source
	IColorFrameSource* pColorSource;
	hResult = pSensor->get_ColorFrameSource( &pColorSource );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::get_ColorFrameSource()" << std::endl;
		return -1;
	}

	IBodyFrameSource* pBodySource;
	hResult = pSensor->get_BodyFrameSource( &pBodySource );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::get_BodyFrameSource()" << std::endl;
		return -1;
	}

	// Reader
	IColorFrameReader* pColorReader;
	hResult = pColorSource->OpenReader( &pColorReader );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IColorFrameSource::OpenReader()" << std::endl;
		return -1;
	}

	IBodyFrameReader* pBodyReader;
	hResult = pBodySource->OpenReader( &pBodyReader );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IBodyFrameSource::OpenReader()" << std::endl;
		return -1;
	}

	// Description
	IFrameDescription* pDescription;
	hResult = pColorSource->get_FrameDescription( &pDescription );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IColorFrameSource::get_FrameDescription()" << std::endl;
		return -1;
	}

	int width = 0;
	int height = 0;
	pDescription->get_Width( &width ); // 1920
	pDescription->get_Height( &height ); // 1080
	unsigned int bufferSize = width * height * 4 * sizeof( unsigned char );

	cv::Mat bufferMat( height, width, CV_8UC4 );
	cv::Mat bodyMat( height / 2, width / 2, CV_8UC4 );
	cv::namedWindow( "Body" );

	// Color Table
	cv::Vec3b color[BODY_COUNT];
	color[0] = cv::Vec3b( 255,   0,   0 );
	color[1] = cv::Vec3b(   0, 255,   0 );
	color[2] = cv::Vec3b(   0,   0, 255 );
	color[3] = cv::Vec3b( 255, 255,   0 );
	color[4] = cv::Vec3b( 255,   0, 255 );
	color[5] = cv::Vec3b(   0, 255, 255 );

	// Coordinate Mapper
	ICoordinateMapper* pCoordinateMapper;
	hResult = pSensor->get_CoordinateMapper( &pCoordinateMapper );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::get_CoordinateMapper()" << std::endl;
		return -1;
	}

	while( 1 ){
		// Frame
		IColorFrame* pColorFrame = nullptr;
		hResult = pColorReader->AcquireLatestFrame( &pColorFrame );
		if( SUCCEEDED( hResult ) ){
			hResult = pColorFrame->CopyConvertedFrameDataToArray( bufferSize, reinterpret_cast<BYTE*>( bufferMat.data ), ColorImageFormat::ColorImageFormat_Bgra );
			if( SUCCEEDED( hResult ) ){
				cv::resize( bufferMat, bodyMat, cv::Size(), 0.5, 0.5 );
			}
		}
		//SafeRelease( pColorFrame );

		IBodyFrame* pBodyFrame = nullptr;
		hResult = pBodyReader->AcquireLatestFrame( &pBodyFrame );
		if( SUCCEEDED( hResult ) ){
			IBody* pBody[BODY_COUNT] = { 0 };
			hResult = pBodyFrame->GetAndRefreshBodyData( BODY_COUNT, pBody );
			if( SUCCEEDED( hResult ) ){
				for( int count = 0; count < BODY_COUNT; count++ ){
					BOOLEAN bTracked = false;
					hResult = pBody[count]->get_IsTracked( &bTracked );
					if( SUCCEEDED( hResult ) && bTracked ){
						Joint joint[JointType::JointType_Count];
						hResult = pBody[ count ]->GetJoints( JointType::JointType_Count, joint );
						if( SUCCEEDED( hResult ) ){
							// Left Hand State
							HandState leftHandState = HandState::HandState_Unknown;
							hResult = pBody[count]->get_HandLeftState( &leftHandState );
							if( SUCCEEDED( hResult ) ){
								ColorSpacePoint colorSpacePoint = { 0 };
								hResult = pCoordinateMapper->MapCameraPointToColorSpace( joint[JointType::JointType_HandLeft].Position, &colorSpacePoint );
								if( SUCCEEDED( hResult ) ){
									int x = static_cast<int>( colorSpacePoint.X );
									int y = static_cast<int>( colorSpacePoint.Y );
									if( ( x >= 0 ) && ( x < width ) && ( y >= 0 ) && ( y < height ) ){
										if( leftHandState == HandState::HandState_Open ){
											cv::circle( bufferMat, cv::Point( x, y ), 75, cv::Scalar( 0, 128, 0 ), 5, CV_AA );
										}
										else if( leftHandState == HandState::HandState_Closed ){
											cv::circle( bufferMat, cv::Point( x, y ), 75, cv::Scalar( 0, 0, 128 ), 5, CV_AA );
										}
										else if( leftHandState == HandState::HandState_Lasso ){
											cv::circle( bufferMat, cv::Point( x, y ), 75, cv::Scalar( 128, 128, 0 ), 5, CV_AA );
										}
									}
								}
							}

							// Right Hand State
							HandState rightHandState = HandState::HandState_Unknown;
							hResult = pBody[count]->get_HandRightState( &rightHandState );
							if( SUCCEEDED( hResult ) ){
								ColorSpacePoint colorSpacePoint = { 0 };
								hResult = pCoordinateMapper->MapCameraPointToColorSpace( joint[JointType::JointType_HandRight].Position, &colorSpacePoint );
								if( SUCCEEDED( hResult ) ){
									int x = static_cast<int>( colorSpacePoint.X );
									int y = static_cast<int>( colorSpacePoint.Y );
									if( ( x >= 0 ) && ( x < width ) && ( y >= 0 ) && ( y < height ) ){
										if( rightHandState == HandState::HandState_Open ){
											cv::circle( bufferMat, cv::Point( x, y ), 75, cv::Scalar( 0, 128, 0 ), 5, CV_AA );
										}
										else if( rightHandState == HandState::HandState_Closed ){
											cv::circle( bufferMat, cv::Point( x, y ), 75, cv::Scalar( 0, 0, 128 ), 5, CV_AA );
										}
										else if( rightHandState == HandState::HandState_Lasso ){
											cv::circle( bufferMat, cv::Point( x, y ), 75, cv::Scalar( 128, 128, 0 ), 5, CV_AA );
										}
									}
								}
							}

							// Joint
							for( int type = 0; type < JointType::JointType_Count; type++ ){
								ColorSpacePoint colorSpacePoint = { 0 };
								pCoordinateMapper->MapCameraPointToColorSpace( joint[type].Position, &colorSpacePoint );
								int x = static_cast<int>( colorSpacePoint.X );
								int y = static_cast<int>( colorSpacePoint.Y );
								if( ( x >= 0 ) && ( x < width ) && ( y >= 0 ) && ( y < height ) ){
									cv::circle( bufferMat, cv::Point( x, y ), 5, static_cast< cv::Scalar >( color[count] ), -1, CV_AA );
								}
							}
						}

						/*// Activity
						UINT capacity = 0;
						DetectionResult detectionResults = DetectionResult::DetectionResult_Unknown;
						hResult = pBody[count]->GetActivityDetectionResults( capacity, &detectionResults );
						if( SUCCEEDED( hResult ) ){
							if( detectionResults == DetectionResult::DetectionResult_Yes ){
								switch( capacity ){
									case Activity::Activity_EyeLeftClosed:
										std::cout << "Activity_EyeLeftClosed" << std::endl;
										break;
									case Activity::Activity_EyeRightClosed:
										std::cout << "Activity_EyeRightClosed" << std::endl;
										break;
									case Activity::Activity_MouthOpen:
										std::cout << "Activity_MouthOpen" << std::endl;
										break;
									case Activity::Activity_MouthMoved:
										std::cout << "Activity_MouthMoved" << std::endl;
										break;
									case Activity::Activity_LookingAway:
										std::cout << "Activity_LookingAway" << std::endl;
										break;
									default:
										break;
								}
							}
						}
						else{
							std::cerr << "Error : IBody::GetActivityDetectionResults()" << std::endl;
						}*/

						/*// Appearance
						capacity = 0;
						detectionResults = DetectionResult::DetectionResult_Unknown;
						hResult = pBody[count]->GetAppearanceDetectionResults( capacity, &detectionResults );
						if( SUCCEEDED( hResult ) ){
							if( detectionResults == DetectionResult::DetectionResult_Yes ){
								switch( capacity ){
									case Appearance::Appearance_WearingGlasses:
										std::cout << "Appearance_WearingGlasses" << std::endl;
										break;
									default:
										break;
								}
							}
						}
						else{
							std::cerr << "Error : IBody::GetAppearanceDetectionResults()" << std::endl;
						}*/

						/*// Expression
						capacity = 0;
						detectionResults = DetectionResult::DetectionResult_Unknown;
						hResult = pBody[count]->GetExpressionDetectionResults( capacity, &detectionResults );
						if( SUCCEEDED( hResult ) ){
							if( detectionResults == DetectionResult::DetectionResult_Yes ){
								switch( capacity ){
									case Expression::Expression_Happy:
										std::cout << "Expression_Happy" << std::endl;
										break;
									case Expression::Expression_Neutral:
										std::cout << "Expression_Neutral" << std::endl;
										break;
									default:
										break;
								}
							}
						}
						else{
							std::cerr << "Error : IBody::GetExpressionDetectionResults()" << std::endl;
						}*/

						// Lean
						PointF amount;
						hResult = pBody[count]->get_Lean( &amount );
						if( SUCCEEDED( hResult ) ){
							std::cout << "amount : " << amount.X << ", " << amount.Y << std::endl;
						}
					}
				}
				cv::resize( bufferMat, bodyMat, cv::Size(), 0.5, 0.5 );
			}
			for( int count = 0; count < BODY_COUNT; count++ ){
				SafeRelease( pBody[count] );
			}
		}
		//SafeRelease( pBodyFrame );

		SafeRelease( pColorFrame );
		SafeRelease( pBodyFrame );

		cv::imshow( "Body", bodyMat );

		if( cv::waitKey( 10 ) == VK_ESCAPE ){
			break;
		}
	}

	SafeRelease( pColorSource );
	SafeRelease( pBodySource );
	SafeRelease( pColorReader );
	SafeRelease( pBodyReader );
	SafeRelease( pDescription );
	SafeRelease( pCoordinateMapper );
	if( pSensor ){
		pSensor->Close();
	}
	SafeRelease( pSensor );
	cv::destroyAllWindows();

	return 0;
}
示例#21
0
int main()
{
	// 1. Sensor related code
	cout << "Try to get default sensor" << endl;
	{
		if (GetDefaultKinectSensor(&pSensor) != S_OK) {
			cerr << "Get Sensor failed" << endl;
			return -1;
		}

		cout << "Try to open sensor" << endl;
		if (pSensor->Open() != S_OK)	 {
			cerr << "Can't open sensor" << endl;
			return -1;
		}
	}

	// 2. Color related code
	cout << "Try to get color source" << endl;
	{
		// Get frame source
		IColorFrameSource* pFrameSource = nullptr;
		if (pSensor->get_ColorFrameSource(&pFrameSource) != S_OK) {
			cerr << "Can't get color frame source" << endl;
			return -1;
		}

		// Get frame description
		cout << "get color frame description" << endl;
		IFrameDescription* pFrameDescription = nullptr;
		if (pFrameSource->get_FrameDescription(&pFrameDescription) == S_OK)	{
			pFrameDescription->get_Width(&iColorWidth);
			pFrameDescription->get_Height(&iColorHeight);

			uColorPointNum = iColorWidth * iColorHeight;
			uColorBufferSize = uColorPointNum * 4 * sizeof(BYTE);

			pCSPoints = new CameraSpacePoint[uColorPointNum];
			pColorBuffer = new BYTE[4 * uColorPointNum];
		}
		pFrameDescription->Release();
		pFrameDescription = nullptr;

		// get frame reader
		cout << "Try to get color frame reader" << endl;
		if (pFrameSource->OpenReader(&pColorFrameReader) != S_OK) {
			cerr << "Can't get color frame reader" << endl;
			return -1;
		}

		// release Frame source
		cout << "Release frame source" << endl;
		pFrameSource->Release();
		pFrameSource = nullptr;
	}

	// 3. Depth related code
	cout << "Try to get depth source" << endl;
	{
		// Get frame source
		IDepthFrameSource* pFrameSource = nullptr;
		if (pSensor->get_DepthFrameSource(&pFrameSource) != S_OK) {
			cerr << "Can't get depth frame source" << endl;
			return -1;
		}

		// Get frame description
		cout << "get depth frame description" << endl;
		IFrameDescription* pFrameDescription = nullptr;
		if (pFrameSource->get_FrameDescription(&pFrameDescription) == S_OK) {
			int	iDepthWidth = 0,
				iDepthHeight = 0;
			pFrameDescription->get_Width(&iDepthWidth);
			pFrameDescription->get_Height(&iDepthHeight);
			uDepthPointNum = iDepthWidth * iDepthHeight;
			pDepthBuffer = new UINT16[uDepthPointNum];
		}
		pFrameDescription->Release();
		pFrameDescription = nullptr;

		// get frame reader
		cout << "Try to get depth frame reader" << endl;
		if (pFrameSource->OpenReader(&pDepthFrameReader) != S_OK) {
			cerr << "Can't get depth frame reader" << endl;
			return -1;
		}

		// release Frame source
		cout << "Release frame source" << endl;
		pFrameSource->Release();
		pFrameSource = nullptr;
	}

	// 4. Coordinate Mapper
	if (pSensor->get_CoordinateMapper(&pCoordinateMapper) != S_OK) {
		cerr << "get_CoordinateMapper failed" << endl;
		return -1;
	}

	while (1) {
		idle();
		if ((int)pColorBuffer[0] != 0) {		
			capture_point();
		}
	}
	ExitFunction();

	return 0;
}
int main()
{

	// name and position windows
	cvNamedWindow("Color Probabilistic Tracking - Samples", 1);
	cvMoveWindow("Color Probabilistic Tracking - Samples", 0, 0);
	cvNamedWindow("Color Probabilistic Tracking - Result", 1);
	cvMoveWindow("Color Probabilistic Tracking - Result", 1000, 0);

	//control mouse
	setMouseCallback("Color Probabilistic Tracking - Samples", onMouse, 0);

	cv::setUseOptimized(true);

	// Sensor
	IKinectSensor* pSensor;
	HRESULT hResult = S_OK;
	hResult = GetDefaultKinectSensor(&pSensor);
	if (FAILED(hResult)) {
		std::cerr << "Error : GetDefaultKinectSensor" << std::endl;
		return -1;
	}

	hResult = pSensor->Open();
	if (FAILED(hResult)) {
		std::cerr << "Error : IKinectSensor::Open()" << std::endl;
		return -1;
	}

	// Source
	IColorFrameSource* pColorSource;
	hResult = pSensor->get_ColorFrameSource(&pColorSource);
	if (FAILED(hResult)) {
		std::cerr << "Error : IKinectSensor::get_ColorFrameSource()" << std::endl;
		return -1;
	}

	IDepthFrameSource* pDepthSource;
	hResult = pSensor->get_DepthFrameSource(&pDepthSource);
	if (FAILED(hResult)) {
		std::cerr << "Error : IKinectSensor::get_DepthFrameSource()" << std::endl;
		return -1;
	}

	/*IBodyIndexFrameSource* pBodyIndexSource;
	hResult = pSensor->get_BodyIndexFrameSource(&pBodyIndexSource);*/

	// Reader
	IColorFrameReader* pColorReader;
	hResult = pColorSource->OpenReader(&pColorReader);
	if (FAILED(hResult)) {
		std::cerr << "Error : IColorFrameSource::OpenReader()" << std::endl;
		return -1;
	}

	IDepthFrameReader* pDepthReader;
	hResult = pDepthSource->OpenReader(&pDepthReader);
	if (FAILED(hResult)) {
		std::cerr << "Error : IDepthFrameSource::OpenReader()" << std::endl;
		return -1;
	}

	//IBodyIndexFrameReader* pBodyIndexReader;//saferealease
	//hResult = pBodyIndexSource->OpenReader(&pBodyIndexReader);

	// Description
	IFrameDescription* pColorDescription;
	hResult = pColorSource->get_FrameDescription(&pColorDescription);
	if (FAILED(hResult)) {
		std::cerr << "Error : IColorFrameSource::get_FrameDescription()" << std::endl;
		return -1;
	}

	int colorWidth = 0;
	int colorHeight = 0;
	pColorDescription->get_Width(&colorWidth); // 1920
	pColorDescription->get_Height(&colorHeight); // 1080
	unsigned int colorBufferSize = colorWidth * colorHeight * 4 * sizeof(unsigned char);

	cv::Mat colorBufferMat(colorHeight, colorWidth, CV_8UC4);
	cv::Mat colorMat(colorHeight / 2, colorWidth / 2, CV_8UC4);
	cv::namedWindow("Color");

	RGBQUAD* m_pDepthRGBX;
	m_pDepthRGBX = new RGBQUAD[512 * 424];// create heap storage for color pixel data in RGBX format

	IFrameDescription* pDepthDescription;
	hResult = pDepthSource->get_FrameDescription(&pDepthDescription);
	if (FAILED(hResult)) {
		std::cerr << "Error : IDepthFrameSource::get_FrameDescription()" << std::endl;
		return -1;
	}

	int depthWidth = 0;
	int depthHeight = 0;
	pDepthDescription->get_Width(&depthWidth); // 512
	pDepthDescription->get_Height(&depthHeight); // 424
	unsigned int depthBufferSize = depthWidth * depthHeight * sizeof(unsigned short);

	cv::Mat depthBufferMat(depthHeight, depthWidth, CV_16UC1);
	UINT16* pDepthBuffer = nullptr;
	cv::Mat depthMat(depthHeight, depthWidth, CV_8UC1);
	cv::namedWindow("Depth");

	//UINT32 nBodyIndexSize = 0;
	//BYTE* pIndexBuffer = nullptr;//This hasn't been safe realease yet

	// Coordinate Mapper
	ICoordinateMapper* pCoordinateMapper;
	hResult = pSensor->get_CoordinateMapper(&pCoordinateMapper);
	if (FAILED(hResult)) {
		std::cerr << "Error : IKinectSensor::get_CoordinateMapper()" << std::endl;
		return -1;
	}

	cv::Mat coordinateMapperMat(depthHeight, depthWidth, CV_8UC4);
	cv::namedWindow("CoordinateMapper");

	unsigned short minDepth, maxDepth;
	pDepthSource->get_DepthMinReliableDistance(&minDepth);
	pDepthSource->get_DepthMaxReliableDistance(&maxDepth);




	while (1) {

		double t = (double)getTickCount();


		// Color Frame
		IColorFrame* pColorFrame = nullptr;
		hResult = pColorReader->AcquireLatestFrame(&pColorFrame);
		if (SUCCEEDED(hResult)) {
			hResult = pColorFrame->CopyConvertedFrameDataToArray(colorBufferSize, reinterpret_cast<BYTE*>(colorBufferMat.data), ColorImageFormat::ColorImageFormat_Bgra);
			if (SUCCEEDED(hResult)) {
				cv::resize(colorBufferMat, colorMat, cv::Size(), 0.5, 0.5);
			}
		}
		//SafeRelease( pColorFrame );

		// Depth Frame
		IDepthFrame* pDepthFrame = nullptr;
		hResult = pDepthReader->AcquireLatestFrame(&pDepthFrame);

		if (SUCCEEDED(hResult)) {
			hResult = pDepthFrame->AccessUnderlyingBuffer(&depthBufferSize, reinterpret_cast<UINT16**>(&depthBufferMat.data));

		}

		if (SUCCEEDED(hResult)) {
			hResult = pDepthFrame->AccessUnderlyingBuffer(&depthBufferSize, &pDepthBuffer);

			if (SUCCEEDED(hResult))
			{
				RGBQUAD* pRGBX = m_pDepthRGBX;

				// end pixel is start + width*height - 1
				const UINT16* pBufferEnd = pDepthBuffer + (512 * 424);
				int index = 0;

				while (pDepthBuffer < pBufferEnd)
				{
					USHORT depth = *pDepthBuffer;

					BYTE intensity = static_cast<BYTE>((depth >= 50) && (depth <= 5000) ? (depth % 256) : 0);

					pRGBX->rgbRed = intensity;
					pRGBX->rgbGreen = intensity;
					pRGBX->rgbBlue = intensity;

					depthData[index] = depth;

					++index;
					++pRGBX;
					++pDepthBuffer;
				}
			}
		}

		Mat DepthImage(424, 512, CV_8UC4, m_pDepthRGBX);
		//SafeRelease( pDepthFrame );

		// Mapping (Depth to Color)
		if (SUCCEEDED(hResult)) {
			std::vector<ColorSpacePoint> colorSpacePoints(depthWidth * depthHeight);
			hResult = pCoordinateMapper->MapDepthFrameToColorSpace(depthWidth * depthHeight, reinterpret_cast<UINT16*>(depthBufferMat.data), depthWidth * depthHeight, &colorSpacePoints[0]);
			if (SUCCEEDED(hResult)) {
				coordinateMapperMat = cv::Scalar(0, 0, 0, 0);
				for (int y = 0; y < depthHeight; y++) {
					for (int x = 0; x < depthWidth; x++) {
						unsigned int index = y * depthWidth + x;
						ColorSpacePoint point = colorSpacePoints[index];
						int colorX = static_cast<int>(std::floor(point.X + 0.5));
						int colorY = static_cast<int>(std::floor(point.Y + 0.5));
						unsigned short depth = depthBufferMat.at<unsigned short>(y, x);
						if ((colorX >= 0) && (colorX < colorWidth) && (colorY >= 0) && (colorY < colorHeight)/* && ( depth >= minDepth ) && ( depth <= maxDepth )*/) {
							coordinateMapperMat.at<cv::Vec4b>(y, x) = colorBufferMat.at<cv::Vec4b>(colorY, colorX);
						}
					}
				}
			}
		}

		if (SUCCEEDED(hResult))
		{

			//Particle Filter Start from here
			frame = &(IplImage)coordinateMapperMat;//transorm mat into IplImage

			if (image == 0)
			{
				// initialize variables and allocate buffers 
				image = cvCreateImage(cvGetSize(frame), 8, 4);//every pixel has 4 channels
				image->origin = frame->origin;

				result = cvCreateImage(cvGetSize(frame), 8, 4);
				result->origin = frame->origin;

				hsv = cvCreateImage(cvGetSize(frame), 8, 3);

				hue = cvCreateImage(cvGetSize(frame), 8, 1);

				sat = cvCreateImage(cvGetSize(frame), 8, 1);

				histimg_ref = cvCreateImage(cvGetSize(frame), 8, 3);
				histimg_ref->origin = frame->origin;
				cvZero(histimg_ref);

				histimg = cvCreateImage(cvGetSize(frame), 8, 3);
				histimg->origin = frame->origin;
				cvZero(histimg);

				bin_w = histimg_ref->width / BIN;
				bin_h = histimg_ref->height / BIN;


				data1.sample_t = reinterpret_cast<Region *> (malloc(sizeof(Region)* SAMPLE));
				data1.sample_t_1 = reinterpret_cast<Region *> (malloc(sizeof(Region)* SAMPLE));
				data1.sample_weight = reinterpret_cast<double *> (malloc(sizeof(double)* SAMPLE));
				data1.accum_weight = reinterpret_cast<double *> (malloc(sizeof(double)* SAMPLE));

			}

			cvCopy(frame, image);
			cvCopy(frame, result);
			cvCvtColor(image, hsv, CV_BGR2HSV);//image ~ hsv


			if (tracking)
			{
				//v_max = 0.0;

				cvSplit(hsv, hue, 0, 0, 0);//hsv->hue
				cvSplit(hsv, 0, 0, sat, 0);//hsv-saturation

				if (selecting)
				{

					// get the selected target area
					//ref_v_max = 0.0;
					area.width = abs(P_org.x - P_end.x);
					area.height = abs(P_org.y - P_end.y);
					area.x = MIN(P_org.x, P_end.x);
					area.y = MIN(P_org.y, P_end.y);

					cvZero(histimg_ref);

					// build reference histogram
					cvSetImageROI(hue, area);
					cvSetImageROI(sat, area);

					// zero reference histogram
					for (i = 0; i < BIN; i++)
						for (j = 0; j < BIN; j++)
							hist_ref[i][j] = 0.0;

					// calculate reference histogram
					for (i = 0; i < area.height; i++)
					{
						for (j = 0; j < area.width; j++)
						{
							im_hue = cvGet2D(hue, i, j);
							im_sat = cvGet2D(sat, i, j);
							k = int(im_hue.val[0] / STEP_HUE);
							h = int(im_sat.val[0] / STEP_SAT);
							hist_ref[k][h] = hist_ref[k][h] + 1.0;
						}
					}


					// rescale the value of each bin in the reference histogram 
					// and show it as an image
					for (i = 0; i < BIN; i++)
					{
						for (j = 0; j < BIN; j++)
						{
							hist_ref[i][j] = hist_ref[i][j] / (area.height*area.width);
						}
					}

					cvResetImageROI(hue);
					cvResetImageROI(sat);

					// initialize tracking and samples
					track_win = area;
					Initdata(track_win);
					track_win_last = track_win;

					// set up flag of tracking
					selecting = 0;

				}

				// sample propagation and weighting
				track_win = ImProcess(hue, sat, hist_ref, track_win_last);
				FrameNumber++;

				track_win_last = track_win;
				cvZero(histimg);

				// draw the one RED bounding box
				cvRectangle(image, cvPoint(track_win.x, track_win.y), cvPoint(track_win.x + track_win.width, track_win.y + track_win.height), CV_RGB(255, 0, 0), 2);
				printf("width = %d, height = %d\n", track_win.width, track_win.height);

				//save certian images
				if (FrameNumber % 10 == 0)
				{
					if (FrameNumber / 10 == 1) cvSaveImage("./imageout1.jpg", image);

					if (FrameNumber / 10 == 2) cvSaveImage("./imageout2.jpg", image);

					if (FrameNumber / 10 == 3) cvSaveImage("./imageout3.jpg", image);

					if (FrameNumber / 10 == 4) cvSaveImage("./imageout4.jpg", image);

					if (FrameNumber / 10 == 5) cvSaveImage("./imageout5.jpg", image);

					if (FrameNumber / 10 == 6) cvSaveImage("./imageout6.jpg", image);

					if (FrameNumber / 10 == 7) cvSaveImage("./imageout7.jpg", image);

					if (FrameNumber / 10 == 8) cvSaveImage("./imageout8.jpg", image);
				}

				//save certian images
				if (FrameNumber % 10 == 0)
				{
					if (FrameNumber / 10 == 1) cvSaveImage("./resultout1.jpg", result);

					if (FrameNumber / 10 == 2) cvSaveImage("./resultout2.jpg", result);

					if (FrameNumber / 10 == 3) cvSaveImage("./resultout3.jpg", result);

					if (FrameNumber / 10 == 4) cvSaveImage("./resultout4.jpg", result);

					if (FrameNumber / 10 == 5) cvSaveImage("./resultout5.jpg", result);

					if (FrameNumber / 10 == 6) cvSaveImage("./resultout6.jpg", result);

					if (FrameNumber / 10 == 7) cvSaveImage("./resultout7.jpg", result);

					if (FrameNumber / 10 == 8) cvSaveImage("./resultout8.jpg", result);
				}

				//draw a same bounding box in DepthImage
				rectangle(DepthImage, track_win, CV_RGB(255, 0, 0), 2);

				//******************************************************Geodesic Distance***************************************************************************************
					//Point propagation and weight
					if (PointTrack == 1)
					{

						if (PointSelect == 1)//only visit once
						{

							// initialize tracking and samples
							for (int i = 0; i < SAMPLE; i++)
							{
								point[i].x_1 = P_track.x;
								point[i].y_1 = P_track.y;
								point[i].z_1 = depthData[P_track.x + P_track.y * 512];
								point[i].x_1_prime = 0.0;
								point[i].y_1_prime = 0.0;
							}
							
							refeFlag = 1;

							p_win = P_track;

							//p_transtart is the start point of the surface mesh
							P_transtart.x = track_win.x;
							P_transtart.y = track_win.y;

							PointSelect = 0;

						}

						//construct the graph(mesh)
						ConstructMesh(depthData, adjlist, P_transtart,track_win.width,track_win.height);

						//calculate shortest path
						vector<int> vertexDist;
						vertexDist.resize(track_win.width*track_win.height);

						ShortestPath(P_extre, adjlist,  vertexDist);

						cvCircle(image, P_extre, 3, CV_RGB(0, 255, 0),1);

						//generate the refernce distance for comparing
						if (refeFlag > 0)
						{
							cvCircle(image, p_win, 3, CV_RGB(0, 0, 255), 1);

							int track = abs(P_transtart.x - P_track.x) + track_win.width * abs(P_transtart.y - P_track.y);

							referDistance = vertexDist[track];

							refeFlag = 0;
						}
						
						//samples propagation
						PredictPoint(p_win);

						//get geodesic distance for each sample. 
						//find the sample which have most similar distance to the refernce distance
						float Dist, AbsDist, WinDist, minAbsDist = 10000;
						int number,sum=0,count=0;

						for (int i = 0; i < SAMPLE; i++)
						{
							int t = abs(P_transtart.x - point[i].x) + track_win.width * abs(P_transtart.y - point[i].y);
							if (adjlist[t].v == false) { count++; continue; }

							int refer = abs(point[i].x - P_transtart.x) + track_win.width * abs(point[i].y - P_transtart.y);
							Dist = vertexDist[refer];

							AbsDist = fabs(referDistance - Dist);

							//point[i].SampleWeight = AbsDist;
							//point[i].AccumWeight = sum;
							//sum = sum + AbsDist;

							if (AbsDist < minAbsDist) { AbsDist = Dist; number = i; WinDist = Dist; }

						}

						//for (int i = 0; i < SAMPLE; i++)
						//{
						//	point[i].SampleWeight = point[i].SampleWeight / sum;
						//	point[i].AccumWeight = point[i].AccumWeight / sum;
						//}

						printf("referDist = %f, winDist = %f, discardPoints = %d\n", referDistance, WinDist,count);
						
						p_win_last = p_win;
						p_win.x = point[number].x;
						p_win.y = point[number].y;

						//samples re-location
						float deltaX = p_win.x - p_win_last.x;
						float deltaY = p_win.y - p_win_last.y;

						UpdatePoint(number, deltaX, deltaY);

						cvCircle(image, p_win, 5, CV_RGB(0, 0, 0));
					}



				//	//****************************************************************************************************************************************
			}

			// if still selecting a target, show the RED selected area
			else cvRectangle(image, P_org, P_end, CV_RGB(255, 0, 0), 1);

		}

		imshow("Depth", DepthImage);
		cvShowImage("Color Probabilistic Tracking - Samples", image);
		cvShowImage("Color Probabilistic Tracking - Result", result);

		SafeRelease(pColorFrame);
		SafeRelease(pDepthFrame);
		//SafeRelease(pBodyIndexFrame);

		cv::imshow("Color", colorMat);
		cv::imshow("Depth", DepthImage);
		cv::imshow("CoordinateMapper", coordinateMapperMat);

		//END OF THE TIME POINT
		t = ((double)getTickCount() - t) / getTickFrequency();
		t = 1 / t;

		//cout << "FPS:" << t << "FrameNumber\n" << FrameNumebr<< endl;
		printf("FPS:%f Frame:%d \n\n", t, FrameNumber);


		if (cv::waitKey(30) == VK_ESCAPE) {
			break;
		}
	}

	SafeRelease(pColorSource);
	SafeRelease(pDepthSource);
	//SafeRelease(pBodyIndexSource);

	SafeRelease(pColorReader);
	SafeRelease(pDepthReader);
	//SafeRelease(pBodyIndexReader);

	SafeRelease(pColorDescription);
	SafeRelease(pDepthDescription);
	SafeRelease(pCoordinateMapper);
	if (pSensor) {
		pSensor->Close();
	}
	SafeRelease(pSensor);
	cv::destroyAllWindows();


	cvReleaseImage(&image);
	cvReleaseImage(&result);
	cvReleaseImage(&histimg_ref);
	cvReleaseImage(&histimg);
	cvReleaseImage(&hsv);
	cvReleaseImage(&hue);
	cvReleaseImage(&sat);
	cvDestroyWindow("Color Probabilistic Tracking - Samples");
	cvDestroyWindow("Color Probabilistic Tracking - Result");

	return 0;
}
示例#23
0
	void capture(Image::Ptr& pImage)
	{
		HRESULT hr;
		
		if (m_pMultiSourceFrameReader==nullptr)
		{
			camera->getContext().error("CameraKinectDevice::capture: m_pMultiSourceFrameReader is nullptr\n");
			// this is bad news - perhaps throw?
			return; // @@@
		}

		IMultiSourceFrame* pMultiSourceFrame = nullptr;
		IDepthFrame* pDepthFrame = nullptr;
		IColorFrame* pColorFrame = nullptr;

		const golem::MSecTmU32 waitStep = 1;
		golem::MSecTmU32 timeWaited = 0;
		golem::Sleep timer;
		while (FAILED(hr = m_pMultiSourceFrameReader->AcquireLatestFrame(&pMultiSourceFrame)))
		{
			// this is in CameraOpenNI, but suspect may be causing problem here
			// if (camera->isTerminating())	return;

			timer.msleep(waitStep);
			timeWaited += waitStep;
			if (timeWaited >= timeout)
			{
				camera->getContext().error("CameraKinectDevice::capture: failed to acquire frame within %d ms\n", timeout);
				// keep going - don't return with nothing; reset stopwatch @@@
				timeWaited = 0;
			}
		}
		
		const golem::SecTmReal systemTime1 = camera->getContext().getTimer().elapsed();

		if (SUCCEEDED(hr))
		{
			IDepthFrameReference* pDepthFrameReference = nullptr;

			hr = pMultiSourceFrame->get_DepthFrameReference(&pDepthFrameReference);
			if (SUCCEEDED(hr))
			{
				hr = pDepthFrameReference->AcquireFrame(&pDepthFrame);
			}
			RELEASE_PTR(pDepthFrameReference);
		}

		if (SUCCEEDED(hr))
		{
			IColorFrameReference* pColorFrameReference = nullptr;

			hr = pMultiSourceFrame->get_ColorFrameReference(&pColorFrameReference);
			if (SUCCEEDED(hr))
			{
				hr = pColorFrameReference->AcquireFrame(&pColorFrame);
			}
			RELEASE_PTR(pColorFrameReference);
		}

		if (SUCCEEDED(hr))
		{
			INT64 nDepthTime = 0;
			IFrameDescription* pDepthFrameDescription = nullptr;
			int nDepthWidth = 0;
			int nDepthHeight = 0;
			UINT nDepthBufferSize = 0;
			UINT16 *pDepthBuffer = nullptr;

			IFrameDescription* pColorFrameDescription = nullptr;
			int nColorWidth = 0;
			int nColorHeight = 0;
			ColorImageFormat imageFormat = ColorImageFormat_None;
			UINT nColorBufferSize = 0;
			RGBQUAD *pColorBuffer = nullptr;

			// get depth frame data

			hr = pDepthFrame->get_RelativeTime(&nDepthTime);

			if (SUCCEEDED(hr))
				hr = pDepthFrame->get_FrameDescription(&pDepthFrameDescription);

			if (SUCCEEDED(hr))
				hr = pDepthFrameDescription->get_Width(&nDepthWidth);

			if (SUCCEEDED(hr))
				hr = pDepthFrameDescription->get_Height(&nDepthHeight);

			if (SUCCEEDED(hr))
				hr = pDepthFrame->AccessUnderlyingBuffer(&nDepthBufferSize, &pDepthBuffer);

			// get color frame data

			if (SUCCEEDED(hr))
				hr = pColorFrame->get_FrameDescription(&pColorFrameDescription);

			if (SUCCEEDED(hr))
				hr = pColorFrameDescription->get_Width(&nColorWidth);

			if (SUCCEEDED(hr))
				hr = pColorFrameDescription->get_Height(&nColorHeight);

			if (SUCCEEDED(hr))
				hr = pColorFrame->get_RawColorImageFormat(&imageFormat);

			if (SUCCEEDED(hr))
			{
				if (imageFormat == ColorImageFormat_Bgra)
				{
					hr = pColorFrame->AccessRawUnderlyingBuffer(&nColorBufferSize, reinterpret_cast<BYTE**>(&pColorBuffer));
				}
				else if (m_pColorRGBX)
				{
					pColorBuffer = m_pColorRGBX;
					nColorBufferSize = nColorWidth * nColorHeight * sizeof(RGBQUAD);
					hr = pColorFrame->CopyConvertedFrameDataToArray(nColorBufferSize, reinterpret_cast<BYTE*>(pColorBuffer), ColorImageFormat_Bgra);
				}
				else
				{
					hr = E_FAIL;
				}
			}
			
			cv::Mat colorFrame;
			colorFrame.create(cColorHeight, cColorWidth, CV_8UC4);
			colorFrame.setTo(cv::Scalar(0, 0, 0, 0));
			//copying color buffer into cv::mat		
			memcpy(colorFrame.data, pColorBuffer, (nColorWidth * nColorHeight) * 4);
			//originally kinect has mirrored image
			cv::flip(colorFrame, colorFrame, 1);
			//converstion to pImage which is BGR 
			cv::Mat colorFrameT;
			cv::cvtColor(colorFrame, colorFrameT, CV_BGRA2BGR);
			
			if (mode & CameraKinect::MODE_COLOUR)
			{
				// initialize buffer
				if (pImage == nullptr)
					pImage.reset(new Image());
				pImage->reserve((int)1920, (int)1080);
				//printf("Before clonning frame\n");
				IplImage * img = cvCloneImage(&(IplImage)colorFrameT);
				cvCopy(img, pImage->image);
				cvReleaseImage(&img);
				//printf("After clonning frame\n");
				const golem::SecTmReal systemTime2 = camera->getContext().getTimer().elapsed();
				//seting timestamp
				pImage->timeStamp = REAL_HALF*(systemTime1 + systemTime2);
			}
			//camera->getContext().debug("After colour\n");
			//needed for retain information of the begining of the buffer
			const UINT16* pBufferStart = pDepthBuffer;
			const UINT16* pBufferEnd = pDepthBuffer + (nDepthWidth * nDepthHeight);
			//copying depth buffer into cv::Mat
			long depthSizeP = (pBufferEnd - pBufferStart) * 2;

			cv::Mat depthFrame;
			depthFrame.create(cDepthHeight, cDepthWidth, CV_16UC1);
			depthFrame.setTo(cv::Scalar(0));
			memcpy(depthFrame.data, pBufferStart, depthSizeP);
			//originally kinect has mirrored image
			cv::flip(depthFrame, depthFrame, 1);
			//camera->getContext().debug("After getting depth data\n");
			//depth mode color data mapped into camera space (remember high resolution 1920x1080)
			if (mode & CameraKinect::MODE_DEPTH && (std::stoi(this->property.format.c_str()) == 101))
			{
				//camera->getContext().debug("In depth mode\n");
				if (pImage == nullptr)
					pImage.reset(new Image());
				pImage->reserve((int)1920, (int)1080);

				pImage->cloud.reset(new PointSeq((uint32_t)property.width, (uint32_t)property.height));
			
				//Obtaining the pointcloud
				hasImageStream = false;
				float missingPoint = std::numeric_limits<float>::quiet_NaN();

				hr = m_pCoordinateMapper->MapColorFrameToCameraSpace(nDepthWidth * nDepthHeight, (UINT16*)pDepthBuffer, cColorHeight * cColorWidth, m_pCameraCoordinatesColor);

				golem::Point* pOutWidth = &pImage->cloud->front();
				CameraSpacePoint* pCamCWidth = m_pCameraCoordinatesColor;

				for (int y = 0; y < cColorHeight; ++y)
				{
					golem::Point* pOut = pOutWidth;
					CameraSpacePoint* pCamC = pCamCWidth;

					for (int x = 0; x < cColorWidth; ++x, pCamC++, ++pOut) {
						cv::Vec4b color;
						int abc = pCamC->Z;
						if (abc != 0) {

							pOut->x = pCamC->X;
							pOut->y = pCamC->Y;
							pOut->z = pCamC->Z;
						}
						else {
							pOut->x = pOut->y = pOut->z = missingPoint;
							//printf("Getting to this point4\n");
						}
						pOut->normal_x = pOut->normal_y = pOut->normal_z = golem::numeric_const<float>::ZERO;

						color = colorFrame.at<cv::Vec4b>(y, x);
						if (hasImageStream) {
							pOut->b = color[0];
							pOut->g = color[1];
							pOut->r = color[2];
							pOut->a = colour._rgba.a;
						}
						else {
							pOut->r = colour._rgba.r;
							pOut->g = colour._rgba.g;
							pOut->b = colour._rgba.b;
							pOut->a = colour._rgba.a;
						}
					}
					colorFrame += cColorWidth;
					pCamCWidth += cColorWidth;
					pOutWidth += cColorWidth;
				}
			}

			//depth mode depth data mapped into camera space
			if (mode & CameraKinect::MODE_DEPTH && (std::stoi(this->property.format.c_str()) == 102))
			{
				if (pImage == nullptr)
					pImage.reset(new Image());
				//camera->getContext().debug("In depth mode\n");
				pImage->cloud.reset(new PointSeq((uint32_t)property.width, (uint32_t)property.height));
				//Obtaining the pointcloud
				hasImageStream = false;
				float missingPoint = std::numeric_limits<float>::quiet_NaN();

				hr = m_pCoordinateMapper->MapDepthFrameToCameraSpace(nDepthWidth * nDepthHeight, (UINT16*)pDepthBuffer, cDepthHeight * cDepthWidth, m_pCameraCoordinatesDepth);

				golem::Point* pOutWidth = &pImage->cloud->front();
				CameraSpacePoint* pCamDWidth = m_pCameraCoordinatesDepth;

				for (int y = 0; y < cDepthHeight; ++y)
				{
					golem::Point* pOut = pOutWidth;
					CameraSpacePoint* pCamC = pCamDWidth;

					for (int x = 0; x < cDepthWidth; ++x, pCamC++, ++pOut) {
						cv::Vec4b color;
						//int abc = pCamC->Z;
						if (pCamC->Z != 0) {
							pOut->x = pCamC->X;
							pOut->y = pCamC->Y;
							pOut->z = pCamC->Z;
						}
						else {
							pOut->x = missingPoint;
							pOut->y = missingPoint;
							pOut->z = missingPoint;
							//	//printf("Getting to this point4\n");
						}
						pOut->normal_x = pOut->normal_y = pOut->normal_z = golem::numeric_const<float>::ZERO;

						/*color = colorframe.at<cv::vec4b>(y, x);
						if (hasimagestream) {
						pout->b = color[0];
						pout->g = color[1];
						pout->r = color[2];
						pout->a = colour._rgba.a;
						}*/
						//else {
						pOut->r = colour._rgba.r;
						pOut->g = colour._rgba.g;
						pOut->b = colour._rgba.b;
						pOut->a = colour._rgba.a;
						//}
					}
					//colorFrame += cDepthWidth;
					pCamDWidth += cDepthWidth;
					pOutWidth += cDepthWidth;
				}
				golem::Mat33 aMatrix;
				aMatrix.m11 = golem::Real(-1);
				aMatrix.m12 = golem::Real(0);
				aMatrix.m13 = golem::Real(0);
				aMatrix.m21 = golem::Real(0);
				aMatrix.m22 = golem::Real(-1);
				aMatrix.m23 = golem::Real(0);
				aMatrix.m31 = golem::Real(0);
				aMatrix.m32 = golem::Real(0);
				aMatrix.m33 = golem::Real(1);
				golem::Vec3 aVector;
				aVector.v1 = 0;
				aVector.v2 = 0;
				aVector.v3 = 0;
				golem::Mat34 aMatrix34;
				aMatrix34.R = aMatrix;
				aMatrix34.p = aVector;

				Cloud::PointSeqPtr pCloud = pImage->cloud;
				const Mat34 frame = Cloud::getSensorFrame(*pCloud);
				Cloud::transform(aMatrix34, *pCloud, *pCloud);
				Cloud::setSensorFrame(frame, *pCloud); // don't change the camera frame !!!!!!!!!!!!!!!!!!!!!!!!!!!!

				const golem::SecTmReal systemTime2 = camera->getContext().getTimer().elapsed();
				//seting timestamp
				pImage->timeStamp = REAL_HALF*(systemTime1 + systemTime2);
			}
			RELEASE_PTR(pDepthFrameDescription);
			RELEASE_PTR(pColorFrameDescription);
		}
		RELEASE_PTR(pDepthFrame);
		RELEASE_PTR(pColorFrame);
		RELEASE_PTR(pMultiSourceFrame);
	}
示例#24
0
int _tmain( int argc, _TCHAR* argv[] )
{
	cv::setUseOptimized( true );

	// Sensor
	IKinectSensor* pSensor;
	HRESULT hResult = S_OK;
	hResult = GetDefaultKinectSensor( &pSensor );
	if( FAILED( hResult ) ){
		std::cerr << "Error : GetDefaultKinectSensor" << std::endl;
		return -1;
	}

	hResult = pSensor->Open();
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::Open()" << std::endl;
		return -1;
	}

	// Source
	IDepthFrameSource* pDepthSource;
	hResult = pSensor->get_DepthFrameSource( &pDepthSource );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::get_DepthFrameSource()" << std::endl;
		return -1;
	}

	// Reader
	IDepthFrameReader* pDepthReader;
	hResult = pDepthSource->OpenReader( &pDepthReader );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IDepthFrameSource::OpenReader()" << std::endl;
		return -1;
	}

	// Description
	IFrameDescription* pDescription;
	hResult = pDepthSource->get_FrameDescription( &pDescription );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IDepthFrameSource::get_FrameDescription()" << std::endl;
		return -1;
	}

	int width = 0;
	int height = 0;
	pDescription->get_Width( &width ); // 512
	pDescription->get_Height( &height ); // 424
	unsigned int bufferSize = width * height * sizeof( unsigned short );

	cv::Mat bufferMat( height, width, CV_16UC1 );
	cv::Mat depthMat( height, width, CV_8UC1 );
	cv::namedWindow( "Depth" );

	// Coordinate Mapper
	ICoordinateMapper* pCoordinateMapper;
	hResult = pSensor->get_CoordinateMapper( &pCoordinateMapper );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::get_CoordinateMapper()" << std::endl;
		return -1;
	}

	// Create Reconstruction
	NUI_FUSION_RECONSTRUCTION_PARAMETERS reconstructionParameter;
	reconstructionParameter.voxelsPerMeter = 256;
	reconstructionParameter.voxelCountX = 512;
	reconstructionParameter.voxelCountY = 384;
	reconstructionParameter.voxelCountZ = 512;

	Matrix4 worldToCameraTransform;
	SetIdentityMatrix( worldToCameraTransform );
	INuiFusionReconstruction* pReconstruction;
	hResult = NuiFusionCreateReconstruction( &reconstructionParameter, NUI_FUSION_RECONSTRUCTION_PROCESSOR_TYPE_AMP, -1, &worldToCameraTransform, &pReconstruction );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateReconstruction()" << std::endl;
		return -1;
	}

	// Create Image Frame
	// Depth
	NUI_FUSION_IMAGE_FRAME* pDepthFloatImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_FLOAT, width, height, nullptr, &pDepthFloatImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( FLOAT )" << std::endl;
		return -1;
	}

	// SmoothDepth
	NUI_FUSION_IMAGE_FRAME* pSmoothDepthFloatImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_FLOAT, width, height, nullptr, &pSmoothDepthFloatImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( FLOAT )" << std::endl;
		return -1;
	}

	// Point Cloud
	NUI_FUSION_IMAGE_FRAME* pPointCloudImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_POINT_CLOUD, width, height, nullptr, &pPointCloudImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( POINT_CLOUD )" << std::endl;
		return -1;
	}

	// Surface
	NUI_FUSION_IMAGE_FRAME* pSurfaceImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_COLOR, width, height, nullptr, &pSurfaceImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( COLOR )" << std::endl;
		return -1;
	}

	// Normal
	NUI_FUSION_IMAGE_FRAME* pNormalImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_COLOR, width, height, nullptr, &pNormalImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( COLOR )" << std::endl;
		return -1;
	}

	cv::namedWindow( "Surface" );
	cv::namedWindow( "Normal" );

	while( 1 ){
		// Frame
		IDepthFrame* pDepthFrame = nullptr;
		hResult = pDepthReader->AcquireLatestFrame( &pDepthFrame );

		if( SUCCEEDED( hResult ) ){
			hResult = pDepthFrame->AccessUnderlyingBuffer( &bufferSize, reinterpret_cast<UINT16**>( &bufferMat.data ) );
			if( SUCCEEDED( hResult ) ){
				bufferMat.convertTo( depthMat, CV_8U, -255.0f / 8000.0f, 255.0f );
				hResult = pReconstruction->DepthToDepthFloatFrame( reinterpret_cast<UINT16*>( bufferMat.data ), width * height * sizeof( UINT16 ), pDepthFloatImageFrame, NUI_FUSION_DEFAULT_MINIMUM_DEPTH/* 0.5[m] */, NUI_FUSION_DEFAULT_MAXIMUM_DEPTH/* 8.0[m] */, true );
				if( FAILED( hResult ) ){
					std::cerr << "Error :INuiFusionReconstruction::DepthToDepthFloatFrame()" << std::endl;
					return -1;
				}
			}
		}
		SafeRelease( pDepthFrame );

		// Smooting Depth Image Frame
		hResult = pReconstruction->SmoothDepthFloatFrame( pDepthFloatImageFrame, pSmoothDepthFloatImageFrame, 1, 0.04f );
		if( FAILED( hResult ) ){
			std::cerr << "Error :INuiFusionReconstruction::SmoothDepthFloatFrame" << std::endl;
			return -1;
		}

		// Reconstruction Process
		pReconstruction->GetCurrentWorldToCameraTransform( &worldToCameraTransform );
		hResult = pReconstruction->ProcessFrame( pSmoothDepthFloatImageFrame, NUI_FUSION_DEFAULT_ALIGN_ITERATION_COUNT, NUI_FUSION_DEFAULT_INTEGRATION_WEIGHT, nullptr, &worldToCameraTransform );
		if( FAILED( hResult ) ){
			static int errorCount = 0;
			errorCount++;
			if( errorCount >= 100 ) {
				errorCount = 0;
				ResetReconstruction( pReconstruction, &worldToCameraTransform );
			}
		}

		// Calculate Point Cloud
		hResult = pReconstruction->CalculatePointCloud( pPointCloudImageFrame, &worldToCameraTransform );
		if( FAILED( hResult ) ){
			std::cerr << "Error : CalculatePointCloud" << std::endl;
			return -1;
		}

		// Shading Point Clouid
		Matrix4 worldToBGRTransform = { 0.0f };
		worldToBGRTransform.M11 = reconstructionParameter.voxelsPerMeter / reconstructionParameter.voxelCountX;
		worldToBGRTransform.M22 = reconstructionParameter.voxelsPerMeter / reconstructionParameter.voxelCountY;
		worldToBGRTransform.M33 = reconstructionParameter.voxelsPerMeter / reconstructionParameter.voxelCountZ;
		worldToBGRTransform.M41 = 0.5f;
		worldToBGRTransform.M42 = 0.5f;
		worldToBGRTransform.M43 = 0.0f;
		worldToBGRTransform.M44 = 1.0f;

		hResult = NuiFusionShadePointCloud( pPointCloudImageFrame, &worldToCameraTransform, &worldToBGRTransform, pSurfaceImageFrame, pNormalImageFrame );
		if( FAILED( hResult ) ){
			std::cerr << "Error : NuiFusionShadePointCloud" << std::endl;
			return -1;
		}

		cv::Mat surfaceMat( height, width, CV_8UC4, pSurfaceImageFrame->pFrameBuffer->pBits );
		cv::Mat normalMat( height, width, CV_8UC4, pNormalImageFrame->pFrameBuffer->pBits );

		cv::imshow( "Depth", depthMat );
		cv::imshow( "Surface", surfaceMat );
		cv::imshow( "Normal", normalMat );

		int key = cv::waitKey( 30 );
		if( key == VK_ESCAPE ){
			break;
		}
		else if( key == 'r' ){
			ResetReconstruction( pReconstruction, &worldToCameraTransform );
		}
	}

	SafeRelease( pDepthSource );
	SafeRelease( pDepthReader );
	SafeRelease( pDescription );
	SafeRelease( pCoordinateMapper );
	SafeRelease( pReconstruction );
	NuiFusionReleaseImageFrame( pDepthFloatImageFrame );
	NuiFusionReleaseImageFrame( pSmoothDepthFloatImageFrame );
	NuiFusionReleaseImageFrame( pPointCloudImageFrame );
	NuiFusionReleaseImageFrame( pSurfaceImageFrame );
	NuiFusionReleaseImageFrame( pNormalImageFrame );
	if( pSensor ){
		pSensor->Close();
	}
	SafeRelease( pSensor );
	cv::destroyAllWindows();

	return 0;
}
示例#25
0
void MyKinect2::Update()
{
    // récupération de l'image en 2D
    if (!m_pColorFrameReader)
    {
        return;
    }

    IColorFrame* pColorFrame = NULL;

    HRESULT hr = m_pColorFrameReader->AcquireLatestFrame(&pColorFrame);

    if (SUCCEEDED(hr))
    {
        INT64 nTime = 0;
        IFrameDescription* pFrameDescription = NULL;
        int nWidth = 0;
        int nHeight = 0;
        ColorImageFormat imageFormat = ColorImageFormat_None;
        UINT nBufferSize = 0;


        hr = pColorFrame->get_RelativeTime(&nTime);

        if (SUCCEEDED(hr))
        {
            hr = pColorFrame->get_FrameDescription(&pFrameDescription);
        }

        if (SUCCEEDED(hr))
        {
            hr = pFrameDescription->get_Width(&nWidth);
        }

        if (SUCCEEDED(hr))
        {
            hr = pFrameDescription->get_Height(&nHeight);
        }

        if (SUCCEEDED(hr))
        {
            hr = pColorFrame->get_RawColorImageFormat(&imageFormat);
        }


        if (SUCCEEDED(hr) && (nWidth == cColorWidth) && (nHeight == cColorHeight))
        {
            if (imageFormat == ColorImageFormat_Bgra)
            {
                hr = pColorFrame->AccessRawUnderlyingBuffer(&nBufferSize, reinterpret_cast<BYTE**>(&webcam.data));
            }
            else if (m_pColorRGBX)
            {
                nBufferSize = cColorWidth * cColorHeight * sizeof(RGBQUAD);
                hr = pColorFrame->CopyConvertedFrameDataToArray(nBufferSize, reinterpret_cast<BYTE*>(webcam.data), ColorImageFormat_Bgra);
            }
            else
            {
                hr = E_FAIL;
            }
        }
        SafeRelease(pFrameDescription);
    }

    // récupération du squelette
    if (!m_pBodyFrameReader)
    {
        return;
    }

    IBodyFrame* pBodyFrame = NULL;

    hr = m_pBodyFrameReader->AcquireLatestFrame(&pBodyFrame);

    if (SUCCEEDED(hr))
    {
        INT64 nTime = 0;

        hr = pBodyFrame->get_RelativeTime(&nTime);

        IBody* ppBodies[BODY_COUNT] = {0};

        if (SUCCEEDED(hr))
        {
            hr = pBodyFrame->GetAndRefreshBodyData(_countof(ppBodies), ppBodies);
        }

        if (SUCCEEDED(hr))
        {
            ProcessBody(BODY_COUNT, ppBodies); // BODY_COUNT est un define de Kinect.h égal à 6... peut etre mettre 1 plus tard afin déviter des problemes lors de la récupération des positions de joints
        }

        for (int i = 0; i < _countof(ppBodies); ++i)
        {
            SafeRelease(ppBodies[i]);
        }
    }

    SafeRelease(pBodyFrame);
    SafeRelease(pColorFrame);
}
/*
bool  MyKinect::Process_AudioFrame(IMultiSourceFrame * pMultiSourceFrame, bool *returnbool)
{
	if (!pMultiSourceFrame) {
		*returnbool = false;
		return false;
	}
	*returnbool = false;
	IDepthFrame * pDepthFrame = NULL;
	IDepthFrameReference *pDepthFrameReference = NULL;
	HRESULT hr = pMultiSourceFrame->get_(&pDepthFrameReference);

	if (SUCCEEDED(hr))
	{
		hr = pDepthFrameReference->AcquireFrame(&pDepthFrame);
	}
	SafeRelease(pDepthFrameReference);
 
	if (SUCCEEDED(hr))
	{
		IFrameDescription * pFrameDescription = NULL;
		int nWidth = 0;
		int nHeight = 0;
		//	USHORT nDepthMinReliableDistance = 0;
		//USHORT nDepthMaxDistance = 0;
		UINT nBufferSize = 0;
		UINT16 *pBuffer = NULL;

		if (SUCCEEDED(hr))
		{
			hr = pDepthFrame->get_FrameDescription(&pFrameDescription);
		}
		if (SUCCEEDED(hr))
		{
			hr = pFrameDescription->get_Width(&nWidth);
		}
		if (SUCCEEDED(hr))
		{
			hr = pFrameDescription->get_Height(&nHeight);
		}
		if (SUCCEEDED(hr))
		{
			hr = pDepthFrame->get_DepthMinReliableDistance(&cDepthMinReliableDistance);
		}
		if (SUCCEEDED(hr))
		{
			hr = pDepthFrame->get_DepthMaxReliableDistance(&cDepthMaxDistance);
		}

		if (SUCCEEDED(hr))
		{//这里是将指针buffer提取出来了,没有拷贝
			hr = pDepthFrame->AccessUnderlyingBuffer(&nBufferSize, &pBuffer);//这里的size是ushort而言的,memcopy是uchar来的。
		}

		if (SUCCEEDED(hr))
		{
			//int tempsize = cDepthHeight*cDepthWidth *sizeof(USHORT);
			memcpy(m_pDepthBuffer, pBuffer, nBufferSize*sizeof(USHORT));

			*returnbool = true;
			//ProcessDepth(pBuffer, nWidth, nHeight, nDepthMinReliableDistance, nDepthMaxDistance);
		}

		SafeRelease(pFrameDescription);//Description 和Frame 都要释放的
	}
 
	SafeRelease(pDepthFrame);

	return *returnbool;
}*/
bool  MyKinect::Process_DepthFrame(IMultiSourceFrame * pMultiSourceFrame, bool *returnbool)
{
	if (!pMultiSourceFrame) {
		*returnbool = false;
		return false;
	}
	 *returnbool = false;
	IDepthFrame * pDepthFrame = NULL;
	IDepthFrameReference *pDepthFrameReference = NULL;
	HRESULT hr = pMultiSourceFrame->get_DepthFrameReference(&pDepthFrameReference);

	if (SUCCEEDED(hr))
	{
		hr = pDepthFrameReference->AcquireFrame(&pDepthFrame);
	}
	SafeRelease(pDepthFrameReference);
	/*if (!SUCCEEDED(hr))
	{
		cout << "    深度帧丢失" << ++depthLostFrames << endl;
	}*/
	

	if (SUCCEEDED(hr))
	{
		IFrameDescription * pFrameDescription = NULL;
		int nWidth = 0;
		int nHeight = 0;
	//	USHORT nDepthMinReliableDistance = 0;
		//USHORT nDepthMaxDistance = 0;
		UINT nBufferSize = 0;
		UINT16 *pBuffer = NULL;
		
		if (SUCCEEDED(hr))
		{
			hr = pDepthFrame->get_FrameDescription(&pFrameDescription);
		}
		if (SUCCEEDED(hr))
		{
			hr = pFrameDescription->get_Width(&nWidth);
		}
		if (SUCCEEDED(hr))
		{
			hr = pFrameDescription->get_Height(&nHeight);
		}
		if (SUCCEEDED(hr))
		{
			hr = pDepthFrame->get_DepthMinReliableDistance(&cDepthMinReliableDistance);
		}
		if (SUCCEEDED(hr))
		{
			hr = pDepthFrame->get_DepthMaxReliableDistance(&cDepthMaxDistance);
		}

		if (SUCCEEDED(hr))
		{//这里是将指针buffer提取出来了,没有拷贝
			hr = pDepthFrame->AccessUnderlyingBuffer(&nBufferSize, &pBuffer);//这里的size是ushort而言的,memcopy是uchar来的。
		}

		if (SUCCEEDED(hr))
		{
			//int tempsize = cDepthHeight*cDepthWidth *sizeof(USHORT);
			memcpy(m_pDepthBuffer, pBuffer, nBufferSize*sizeof(USHORT));

			*returnbool = true;
			//ProcessDepth(pBuffer, nWidth, nHeight, nDepthMinReliableDistance, nDepthMaxDistance);
		}

		SafeRelease(pFrameDescription);//Description 和Frame 都要释放的
	}
/*	else if (colorDepthFramesynchronization)//深度帧没了,但是彩色帧处理了,就要用上一帧来
	{//没用的, 已经用上一帧的coordinates处理了,这就多余了
	}
	*/
	SafeRelease(pDepthFrame);
	
	return *returnbool;

}
示例#27
0
int _tmain( int argc, _TCHAR* argv[] )
{
	cv::setUseOptimized( true );

	// Sensor
	IKinectSensor* pSensor;
	HRESULT hResult = S_OK;
	hResult = GetDefaultKinectSensor( &pSensor );
	if( FAILED( hResult ) ){
		std::cerr << "Error : GetDefaultKinectSensor" << std::endl;
		return -1;
	}

	hResult = pSensor->Open();
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::Open()" << std::endl;
		return -1;
	}

	// Source
	IBodyIndexFrameSource* pBodyIndexSource;
	hResult = pSensor->get_BodyIndexFrameSource( &pBodyIndexSource );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::get_BodyIndexFrameSource()" << std::endl;
		return -1;
	}

	// Reader
	IBodyIndexFrameReader* pBodyIndexReader;
	hResult = pBodyIndexSource->OpenReader( &pBodyIndexReader );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IBodyIndexFrameSource::OpenReader()" << std::endl;
		return -1;
	}

	// Description
	IFrameDescription* pDescription;
	hResult = pBodyIndexSource->get_FrameDescription( &pDescription );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IBodyIndexFrameSource::get_FrameDescription()" << std::endl;
		return -1;
	}

	int width = 0;
	int height = 0;
	pDescription->get_Width( &width ); // 512
	pDescription->get_Height( &height ); // 424

	cv::Mat bodyIndexMat( height, width, CV_8UC3 );
	cv::namedWindow( "BodyIndex" );

	// Color Table
	cv::Vec3b color[BODY_COUNT];
	color[0] = cv::Vec3b( 255,   0,   0 );
	color[1] = cv::Vec3b(   0, 255,   0 );
	color[2] = cv::Vec3b(   0,   0, 255 );
	color[3] = cv::Vec3b( 255, 255,   0 );
	color[4] = cv::Vec3b( 255,   0, 255 );
	color[5] = cv::Vec3b(   0, 255, 255 );

	while( 1 ){
		// Frame
		IBodyIndexFrame* pBodyIndexFrame = nullptr;
		hResult = pBodyIndexReader->AcquireLatestFrame( &pBodyIndexFrame );
		if( SUCCEEDED( hResult ) ){
			unsigned int bufferSize = 0;
			unsigned char* buffer = nullptr;
			hResult = pBodyIndexFrame->AccessUnderlyingBuffer( &bufferSize, &buffer );
			if( SUCCEEDED( hResult ) ){
				for( int y = 0; y < height; y++ ){
					for( int x = 0; x < width; x++ ){
						unsigned int index = y * width + x;
						if( buffer[index] != 0xff ){
							bodyIndexMat.at<cv::Vec3b>( y, x ) = color[buffer[index]];
						}
						else{
							bodyIndexMat.at<cv::Vec3b>( y, x ) = cv::Vec3b( 0, 0, 0 );
						}
					}
				}
			}
		}
		SafeRelease( pBodyIndexFrame );

		cv::imshow( "BodyIndex", bodyIndexMat );

		if( cv::waitKey( 30 ) == VK_ESCAPE ){
			break;
		}
	}

	SafeRelease( pBodyIndexSource );
	SafeRelease( pBodyIndexReader );
	SafeRelease( pDescription );
	if( pSensor ){
		pSensor->Close();
	}
	SafeRelease( pSensor );
	cv::destroyAllWindows();

	return 0;
}
/// <summary>
/// Main processing function
/// </summary>
void CCoordinateMappingBasics::Update()
{
    if (!m_pMultiSourceFrameReader)
    {
        return;
    }

    IMultiSourceFrame* pMultiSourceFrame = NULL;
    IDepthFrame* pDepthFrame = NULL;
    IColorFrame* pColorFrame = NULL;
    IBodyIndexFrame* pBodyIndexFrame = NULL;

    IBodyFrame* pBodyFrame = NULL;

    HRESULT hr = m_pMultiSourceFrameReader->AcquireLatestFrame(&pMultiSourceFrame);

    if (SUCCEEDED(hr))
    {
        IDepthFrameReference* pDepthFrameReference = NULL;

        hr = pMultiSourceFrame->get_DepthFrameReference(&pDepthFrameReference);
        if (SUCCEEDED(hr))
        {
            hr = pDepthFrameReference->AcquireFrame(&pDepthFrame);
        }

        SafeRelease(pDepthFrameReference);
    }

    if (SUCCEEDED(hr))
    {
        IColorFrameReference* pColorFrameReference = NULL;

        hr = pMultiSourceFrame->get_ColorFrameReference(&pColorFrameReference);
        if (SUCCEEDED(hr))
        {
            hr = pColorFrameReference->AcquireFrame(&pColorFrame);
        }

        SafeRelease(pColorFrameReference);
    }

    if (SUCCEEDED(hr))
    {
        IBodyIndexFrameReference* pBodyIndexFrameReference = NULL;

        hr = pMultiSourceFrame->get_BodyIndexFrameReference(&pBodyIndexFrameReference);
        if (SUCCEEDED(hr))
        {
            hr = pBodyIndexFrameReference->AcquireFrame(&pBodyIndexFrame);
        }

        SafeRelease(pBodyIndexFrameReference);
    }

    if (SUCCEEDED(hr))
    {
        IBodyFrameReference* pBodyFrameReference = NULL;

        hr = pMultiSourceFrame->get_BodyFrameReference(&pBodyFrameReference);
        if (SUCCEEDED(hr))
        {
            hr = pBodyFrameReference->AcquireFrame(&pBodyFrame);
        }

		SafeRelease(pBodyFrameReference);
    }

    if (SUCCEEDED(hr))
    {
		// Depth
        INT64 nDepthTime = 0;
        IFrameDescription* pDepthFrameDescription = NULL;
        int nDepthWidth = 0;
        int nDepthHeight = 0;
        UINT nDepthBufferSize = 0;
        UINT16 *pDepthBuffer = NULL;

		// Color
        IFrameDescription* pColorFrameDescription = NULL;
        int nColorWidth = 0;
        int nColorHeight = 0;
        ColorImageFormat imageFormat = ColorImageFormat_None;
        UINT nColorBufferSize = 0;
        RGBQUAD *pColorBuffer = NULL;

		// BodyIndex
        IFrameDescription* pBodyIndexFrameDescription = NULL;
        int nBodyIndexWidth = 0;
        int nBodyIndexHeight = 0;
        UINT nBodyIndexBufferSize = 0;
        BYTE *pBodyIndexBuffer = NULL;

		// Body
		IBody* ppBodies[BODY_COUNT] = { 0 };

        // get depth frame data
        hr = pDepthFrame->get_RelativeTime(&nDepthTime);

		// Depth
        if (SUCCEEDED(hr))
        {
            hr = pDepthFrame->get_FrameDescription(&pDepthFrameDescription);
        }

        if (SUCCEEDED(hr))
        {
            hr = pDepthFrameDescription->get_Width(&nDepthWidth);
        }

        if (SUCCEEDED(hr))
        {
            hr = pDepthFrameDescription->get_Height(&nDepthHeight);
        }

        if (SUCCEEDED(hr))
        {
            hr = pDepthFrame->AccessUnderlyingBuffer(&nDepthBufferSize, &pDepthBuffer);            
        }

        // get color frame data
        if (SUCCEEDED(hr))
        {
            hr = pColorFrame->get_FrameDescription(&pColorFrameDescription);
        }

        if (SUCCEEDED(hr))
        {
            hr = pColorFrameDescription->get_Width(&nColorWidth);
        }

        if (SUCCEEDED(hr))
        {
            hr = pColorFrameDescription->get_Height(&nColorHeight);
        }

        if (SUCCEEDED(hr))
        {
            hr = pColorFrame->get_RawColorImageFormat(&imageFormat);
        }

        if (SUCCEEDED(hr))
        {
            if (imageFormat == ColorImageFormat_Bgra)
            {
                hr = pColorFrame->AccessRawUnderlyingBuffer(&nColorBufferSize, reinterpret_cast<BYTE**>(&pColorBuffer));
            }
            else if (m_pColorRGBX)
            {
                pColorBuffer = m_pColorRGBX;
                nColorBufferSize = cColorWidth * cColorHeight * sizeof(RGBQUAD);
                hr = pColorFrame->CopyConvertedFrameDataToArray(nColorBufferSize, reinterpret_cast<BYTE*>(pColorBuffer), ColorImageFormat_Bgra);
            }
            else
            {
                hr = E_FAIL;
            }
        }

        // get body index frame data
        if (SUCCEEDED(hr))
        {
            hr = pBodyIndexFrame->get_FrameDescription(&pBodyIndexFrameDescription);
        }

        if (SUCCEEDED(hr))
        {
            hr = pBodyIndexFrameDescription->get_Width(&nBodyIndexWidth);
        }

        if (SUCCEEDED(hr))
        {
            hr = pBodyIndexFrameDescription->get_Height(&nBodyIndexHeight);
        }

        if (SUCCEEDED(hr))
        {
            hr = pBodyIndexFrame->AccessUnderlyingBuffer(&nBodyIndexBufferSize, &pBodyIndexBuffer);            
        }

		// get body frame data

		if (SUCCEEDED(hr))
		{
			hr = pBodyFrame->GetAndRefreshBodyData(_countof(ppBodies), ppBodies);
		}

        if (SUCCEEDED(hr))
        {
            ProcessFrame(nDepthTime, pDepthBuffer, nDepthWidth, nDepthHeight, 
                pColorBuffer, nColorWidth, nColorHeight,
				pBodyIndexBuffer, nBodyIndexWidth, nBodyIndexHeight, BODY_COUNT, ppBodies);
        }

        SafeRelease(pDepthFrameDescription);
        SafeRelease(pColorFrameDescription);
        SafeRelease(pBodyIndexFrameDescription);

		for (int i = 0; i < _countof(ppBodies); ++i)
		{
			SafeRelease(ppBodies[i]);
		}
	}

    SafeRelease(pDepthFrame);
    SafeRelease(pColorFrame);
    SafeRelease(pBodyIndexFrame);
    SafeRelease(pBodyFrame);
    SafeRelease(pMultiSourceFrame);
}
示例#29
0
	void KinectManager::Update(void)
	{
		if (!m_pColorFrameReader)
		{
			return;
		}

		IColorFrame* pColorFrame = NULL;

		HRESULT hr = m_pColorFrameReader->AcquireLatestFrame(&pColorFrame);

		if (SUCCEEDED(hr))
		{
			INT64 nTime = 0;
			IFrameDescription* pFrameDescription = NULL;
			int nWidth = 0;
			int nHeight = 0;
			ColorImageFormat imageFormat = ColorImageFormat_None;

			hr = pColorFrame->get_RelativeTime(&nTime);

			if (SUCCEEDED(hr))
			{
				hr = pColorFrame->get_FrameDescription(&pFrameDescription);
			}

			if (SUCCEEDED(hr))
			{
				hr = pFrameDescription->get_Width(&nWidth);
			}

			if (SUCCEEDED(hr))
			{
				hr = pFrameDescription->get_Height(&nHeight);
			}

			if (SUCCEEDED(hr))
			{
				hr = pFrameDescription->get_BytesPerPixel(&color_frame_bytes_per_pixel);
			}

			if (SUCCEEDED(hr))
			{
				hr = pFrameDescription->get_LengthInPixels(&color_frame_length_in_pixels);
			}

			if (SUCCEEDED(hr))
			{
				hr = pColorFrame->get_RawColorImageFormat(&imageFormat);
			}


			if (SUCCEEDED(hr))
			{



				if (imageFormat == ColorImageFormat_Rgba)
				{
					UINT nBufferSize = KINECT_STREAM_COLOR_WIDTH * KINECT_STREAM_COLOR_HEIGHT * color_frame_bytes_per_pixel;

					hr = pColorFrame->AccessRawUnderlyingBuffer(&nBufferSize, reinterpret_cast<BYTE**>(&m_pColorRGBX));
				}
				else if (m_pColorRGBX)
				{
					IFrameDescription* pRGBFrameDescription = NULL;
					pColorFrame->CreateFrameDescription(ColorImageFormat_Rgba, &pRGBFrameDescription);
					unsigned int clpp = 0;
					pRGBFrameDescription->get_BytesPerPixel(&clpp);
					unsigned int clip = 0;
					pRGBFrameDescription->get_LengthInPixels(&clip);
					color_frame_bytes_per_pixel = clpp;
					color_frame_length_in_pixels = clip;
					UINT nBufferSize = clip * clpp;
					hr = pColorFrame->CopyConvertedFrameDataToArray(nBufferSize, (reinterpret_cast<BYTE*>(&m_pColorRGBX)), ColorImageFormat_Rgba);
				}
				else
				{
					hr = E_FAIL;
				}
			}

			SafeRelease(pFrameDescription);
		}

		SafeRelease(pColorFrame);
	}
示例#30
0
int _tmain( int argc, _TCHAR* argv[] )
{
	cv::setUseOptimized( true );

	// Sensor
	IKinectSensor* pSensor;
	HRESULT hResult = S_OK;
	hResult = GetDefaultKinectSensor( &pSensor );
	if( FAILED( hResult ) ){
		std::cerr << "Error : GetDefaultKinectSensor" << std::endl;
		return -1;
	}

	hResult = pSensor->Open();
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::Open()" << std::endl;
		return -1;
	}

	// Source
	IColorFrameSource* pColorSource;
	hResult = pSensor->get_ColorFrameSource( &pColorSource );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::get_ColorFrameSource()" << std::endl;
		return -1;
	}

	// Reader
	IColorFrameReader* pColorReader;
	hResult = pColorSource->OpenReader( &pColorReader );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IColorFrameSource::OpenReader()" << std::endl;
		return -1;
	}

	// Description
	IFrameDescription* pDescription;
	hResult = pColorSource->get_FrameDescription( &pDescription );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IColorFrameSource::get_FrameDescription()" << std::endl;
		return -1;
	}

	int width = 0;
	int height = 0;
	pDescription->get_Width( &width ); // 1920
	pDescription->get_Height( &height ); // 1080
	unsigned int bufferSize = width * height * 4 * sizeof( unsigned char );

	cv::Mat bufferMat( height, width, CV_8UC4 );
	cv::Mat colorMat( height / 2, width / 2, CV_8UC4 );
	cv::namedWindow( "Color" );

	// Subscribe Handle
	WAITABLE_HANDLE hColorWaitable;
	hResult = pColorReader->SubscribeFrameArrived( &hColorWaitable );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IColorFrameReader::SubscribeFrameArrived()" << std::endl;
		return -1;
	}

	while( 1 ){
		// Waitable Events
		HANDLE hEvents[ ] = { reinterpret_cast<HANDLE>( hColorWaitable ) };
		WaitForMultipleObjects( ARRAYSIZE( hEvents ), hEvents, true, INFINITE );

		// Arrived Data
		IColorFrameArrivedEventArgs* pColorArgs = nullptr;
		hResult = pColorReader->GetFrameArrivedEventData( hColorWaitable, &pColorArgs );
		if( SUCCEEDED( hResult ) ){
			// Reference
			IColorFrameReference* pColorReference = nullptr;
			hResult = pColorArgs->get_FrameReference( &pColorReference );
			if( SUCCEEDED( hResult ) ){
				// Frame
				IColorFrame* pColorFrame = nullptr;
				hResult = pColorReference->AcquireFrame( &pColorFrame );
				if( SUCCEEDED( hResult ) ){
					hResult = pColorFrame->CopyConvertedFrameDataToArray( bufferSize, reinterpret_cast<BYTE*>( bufferMat.data ), ColorImageFormat::ColorImageFormat_Bgra );
					if( SUCCEEDED( hResult ) ){
						cv::resize( bufferMat, colorMat, cv::Size(), 0.5, 0.5 );
					}
				}
				SafeRelease( pColorFrame );
			}
			SafeRelease( pColorReference );
		}
		SafeRelease( pColorArgs );

		cv::imshow( "Color", colorMat );

		if( cv::waitKey( 30 ) == VK_ESCAPE ){
			break;
		}
	}

	SafeRelease( pColorSource );
	SafeRelease( pColorReader, hColorWaitable );
	SafeRelease( pDescription );
	if( pSensor ){
		pSensor->Close();
	}
	SafeRelease( pSensor );
	cv::destroyAllWindows();

	return 0;
}