HRESULT capKinect::initKinect()
{
	HRESULT hr;

	hr = GetDefaultKinectSensor(&m_pKinect);

	if (FAILED(hr)) return hr;
	if (m_pKinect)
	{
		// initialize successed
		IDepthFrameSource *pDepthFrameSource = NULL;
		hr = m_pKinect->Open();
		if (SUCCEEDED(hr))
		{
			hr = m_pKinect->get_DepthFrameSource(&pDepthFrameSource);
		}
		if (SUCCEEDED(hr))
		{
			hr = pDepthFrameSource->OpenReader(&m_pDepthReader);
		}
		if(pDepthFrameSource)SafeRelease(pDepthFrameSource);
	}
	if (!m_pKinect || FAILED(hr))
	{
		printf("No ready Kinect found!\n");
		return E_FAIL;
	}
	return hr;
}
Beispiel #2
0
bool DepthStream::open()
{
    if (!m_Device->isOpen()) {
        ofLogWarning("ofxKinect2::DepthStream") << "No ready Kinect2 found.";
        return false;
    }

    m_IsInvert = true;
    m_NearValue = 0;
    m_FarValue = 10000;
    IDepthFrameSource *depthFrameSource = nullptr;
    HRESULT hr = m_Device->get().kinect2->get_DepthFrameSource(&depthFrameSource);

    if (SUCCEEDED(hr)) {
        hr = depthFrameSource->OpenReader(&m_StreamHandle.depthFrameReader);
    }

    safeRelease(depthFrameSource);
    if (FAILED(hr)) {
        ofLogWarning("ofxKinect2::DepthStream") << "Can't open stream.";
        return false;
    }

    return Stream::open();
}
bool KinectInterface::start_device() {
	HRESULT hr;
	kinectSensor = nullptr;
	hr = GetDefaultKinectSensor(&kinectSensor);

	// initialize Kinect Sensor
	if (FAILED(hr) || !kinectSensor) {
		std::cout << "ERROR hr=" << hr << "; sensor=" << kinectSensor << std::endl;
		return false;
	}
	CHECKERROR(kinectSensor->Open())
	kinectSensor->get_CoordinateMapper(&mapper);

	// initialize depth frame reader
	//CHECKERROR(kinectSensor->OpenMultiSourceFrameReader(
	//	FrameSourceTypes::FrameSourceTypes_Depth | FrameSourceTypes::FrameSourceTypes_Color,
	//	&reader));
	IDepthFrameSource* depthFrameSource = nullptr;
	CHECKERROR(kinectSensor->get_DepthFrameSource(&depthFrameSource));
	CHECKERROR(depthFrameSource->OpenReader(&depthFrameReader));
	if (depthFrameSource) depthFrameSource->Release();

	IColorFrameSource* colorFrameSource = nullptr;
	CHECKERROR(kinectSensor->get_ColorFrameSource(&colorFrameSource));
	CHECKERROR(colorFrameSource->OpenReader(&colorFrameReader));
	if (depthFrameSource) colorFrameSource->Release();

	return true;
}
//Hao modified it
Kinect2Engine::Kinect2Engine(const char *calibFilename) : ImageSourceEngine(calibFilename)
{
	imageSize_d = Vector2i(512, 424);
	imageSize_rgb = Vector2i(1920, 1080);
	//imageSize_rgb = Vector2i(640, 480);

	nDepthMinReliableDistance = 500;
	nDepthMaxDistance = USHRT_MAX;
	
	data = new PrivateData();

	// create heap storage for color pixel data in RGBX format
	m_pColorRGBX = new RGBQUAD[imageSize_rgb.x * imageSize_rgb.y];

	colorAvailable = true;

	HRESULT hr;
	
	depthAvailable = true;

	hr = GetDefaultKinectSensor(&data->kinectSensor);
	if (FAILED(hr))
	{
		depthAvailable = false;
		printf("Kinect2: Failed to initialise depth camera\n");
		return;
	}

	if (data->kinectSensor)
	{
		IDepthFrameSource* pDepthFrameSource = NULL;
		IColorFrameSource* pColorFrameSource = NULL;

		hr = data->kinectSensor->Open();

		if (SUCCEEDED(hr))
			hr = data->kinectSensor->get_DepthFrameSource(&pDepthFrameSource);

		if (SUCCEEDED(hr))
			hr = pDepthFrameSource->OpenReader(&data->depthFrameReader);

		if (SUCCEEDED(hr))
			hr = data->kinectSensor->get_ColorFrameSource(&pColorFrameSource);

		if (SUCCEEDED(hr))
			hr = pColorFrameSource->OpenReader(&data->colorFrameReader);

		SafeRelease(pColorFrameSource);
		SafeRelease(pDepthFrameSource);

		this->calib.intrinsics_d.SetFrom(366.685, 366.685, 256.52, 208.1, 640, 480);
	}

	if (!data->kinectSensor || FAILED(hr))
	{
		depthAvailable = false;
		printf("Kinect2: No ready Kinect 2 sensor found\n");
		return;
	}
}
HRESULT KinectHDFaceGrabber::initDepthFrameReader()
{
	
	IDepthFrameSource* depthFrameSource = nullptr;
	
	HRESULT hr = m_pKinectSensor->get_DepthFrameSource(&depthFrameSource);
	
	IFrameDescription* frameDescription = nullptr;
	if (SUCCEEDED(hr)){
		hr = depthFrameSource->get_FrameDescription(&frameDescription);
	}

	if (SUCCEEDED(hr)){
		hr = frameDescription->get_Width(&m_depthWidth);
	}

	if (SUCCEEDED(hr)){
		hr = frameDescription->get_Height(&m_depthHeight);
	}

	if (SUCCEEDED(hr)){
		m_depthBuffer.resize(m_depthHeight * m_depthWidth);
	}

	SafeRelease(frameDescription);
	if (SUCCEEDED(hr)){
		hr = depthFrameSource->OpenReader(&m_pDepthFrameReader);
	}

	SafeRelease(depthFrameSource);
	return hr;
}
Beispiel #6
0
void KinectDevice::getFrameInformation() {
    // Get the frame information
    IDepthFrameSource *depthSrc;
    IColorFrameSource *colorSrc;
    IInfraredFrameSource *irSrc;
    ILongExposureInfraredFrameSource *hdirSrc;
    IBodyIndexFrameSource *indexSrc;

    IFrameDescription *depthDesc, *colorDesc, *irDesc, *hdirDesc, *indexDesc;

    if (_streams & Streams::DEPTH_STREAM) {
        _sensor->get_DepthFrameSource(&depthSrc);
        depthSrc->get_FrameDescription(&depthDesc);
        depthFrameInfo = FrameInfo(depthDesc);

        // Min/max vals
        depthSrc->get_DepthMinReliableDistance(&depthFrameInfo.minVal);
        depthSrc->get_DepthMaxReliableDistance(&depthFrameInfo.maxVal);

        // Allocate
        depthData =         std::shared_ptr<uint16_t>(new uint16_t[depthFrameInfo.frameSize]);
        prevDepthData =     std::shared_ptr<uint16_t>(new uint16_t[depthFrameInfo.frameSize]);
    }
    else {
        depthData = nullptr;
        prevDepthData = nullptr;
    }

    if (_streams & Streams::COLOR_STREAM) {
        _sensor->get_ColorFrameSource(&colorSrc);
        colorSrc->get_FrameDescription(&colorDesc);
        colorFrameInfo = FrameInfo(colorDesc);

        colorData = std::shared_ptr<uint16_t>(new uint16_t[colorFrameInfo.frameSize]);
    }
    if (_streams & Streams::IR_STREAM) {
        _sensor->get_InfraredFrameSource(&irSrc);
        irSrc->get_FrameDescription(&irDesc);
        irFrameInfo = FrameInfo(irDesc);

        irData = std::shared_ptr<uint16_t>(new uint16_t[irFrameInfo.frameSize]);
    }
    if (_streams & Streams::HDIR_STREAM) {
        _sensor->get_LongExposureInfraredFrameSource(&hdirSrc);
        hdirSrc->get_FrameDescription(&hdirDesc);
        hdirFrameInfo = FrameInfo(hdirDesc);

        hdirData = std::shared_ptr<uint16_t>(new uint16_t[hdirFrameInfo.frameSize]);
    }
    if (_streams & Streams::INDEX_STREAM) {
        _sensor->get_BodyIndexFrameSource(&indexSrc);
        indexSrc->get_FrameDescription(&indexDesc);
        indexFrameInfo = FrameInfo(indexDesc);

        indexData = std::shared_ptr<BYTE>(new BYTE[indexFrameInfo.frameSize]);
    }
}
Beispiel #7
0
// コンストラクタ
KinectV2::KinectV2()
{
  // センサを取得する
  if (sensor == NULL && GetDefaultKinectSensor(&sensor) == S_OK)
  {
    HRESULT hr;

    // センサの使用を開始する
    hr = sensor->Open();
    assert(hr == S_OK);

    // デプスデータの読み込み設定
    IDepthFrameSource *depthSource;
    hr = sensor->get_DepthFrameSource(&depthSource);
    assert(hr == S_OK);
    hr = depthSource->OpenReader(&depthReader);
    assert(hr == S_OK);
    IFrameDescription *depthDescription;
    hr = depthSource->get_FrameDescription(&depthDescription);
    assert(hr == S_OK);
    depthSource->Release();

    // デプスデータのサイズを得る
    depthDescription->get_Width(&depthWidth);
    depthDescription->get_Height(&depthHeight);
    depthDescription->Release();

    // カラーデータの読み込み設定
    IColorFrameSource *colorSource;
    hr = sensor->get_ColorFrameSource(&colorSource);
    assert(hr == S_OK);
    hr = colorSource->OpenReader(&colorReader);
    assert(hr == S_OK);
    IFrameDescription *colorDescription;
    hr = colorSource->get_FrameDescription(&colorDescription);
    assert(hr == S_OK);
    colorSource->Release();

    // カラーデータのサイズを得る
    colorDescription->get_Width(&colorWidth);
    colorDescription->get_Height(&colorHeight);
    colorDescription->Release();

    // 座標のマッピング
    hr = sensor->get_CoordinateMapper(&coordinateMapper);
    assert(hr == S_OK);

    // depthCount と colorCount を計算してテクスチャとバッファオブジェクトを作成する
    makeTexture();

    // デプスデータからカメラ座標を求めるときに用いる一時メモリを確保する
    position = new GLfloat[depthCount][3];

    // カラーデータを変換する用いる一時メモリを確保する
    color = new GLubyte[colorCount * 4];
  }
}
IFrameDescription* Kinect2StreamImpl::getFrameDescription(OniSensorType sensorType)
{
  if (!m_pKinectSensor) {
    return NULL;
  }

  IFrameDescription* frameDescription = NULL;
  if (sensorType == ONI_SENSOR_COLOR) {
    IColorFrameSource* frameSource = NULL;
    HRESULT hr = m_pKinectSensor->get_ColorFrameSource(&frameSource);
    if (SUCCEEDED(hr)) {
      hr = frameSource->get_FrameDescription(&frameDescription);
      if (FAILED(hr) && frameDescription) {
        frameDescription->Release();
        frameDescription = NULL;
      }
    }
    if (frameSource) {
      frameSource->Release();
    }
  }
  else if (sensorType == ONI_SENSOR_DEPTH) {
    IDepthFrameSource* frameSource = NULL;
    HRESULT hr = m_pKinectSensor->get_DepthFrameSource(&frameSource);
    if (SUCCEEDED(hr)) {
      hr = frameSource->get_FrameDescription(&frameDescription);
      if (FAILED(hr) && frameDescription) {
        frameDescription->Release();
        frameDescription = NULL;
      }
    }
    if (frameSource) {
      frameSource->Release();
    }
  }
  else { // ONI_SENSOR_IR
    IInfraredFrameSource* frameSource = NULL;
    HRESULT hr = m_pKinectSensor->get_InfraredFrameSource(&frameSource);
    if (SUCCEEDED(hr)) {
      hr = frameSource->get_FrameDescription(&frameDescription);
      if (FAILED(hr) && frameDescription) {
        frameDescription->Release();
        frameDescription = NULL;
      }
    }
    if (frameSource) {
      frameSource->Release();
    }
  }

  return frameDescription;
}
void Kinect2StreamImpl::openFrameReader()
{
  if (!m_pKinectSensor) {
    return;
  }

  if (m_sensorType == ONI_SENSOR_COLOR && !m_pFrameReader.color) {
    IColorFrameSource* frameSource = NULL;
    HRESULT hr = m_pKinectSensor->get_ColorFrameSource(&frameSource);
    if (SUCCEEDED(hr)) {
      hr = frameSource->OpenReader(&m_pFrameReader.color);
      if (FAILED(hr) && m_pFrameReader.color) {
        m_pFrameReader.color->Release();
        m_pFrameReader.color = NULL;
      }
    }
    if (frameSource) {
      frameSource->Release();
    }
  }
  else if (m_sensorType == ONI_SENSOR_DEPTH && !m_pFrameReader.depth) {
    IDepthFrameSource* frameSource = NULL;
    HRESULT hr = m_pKinectSensor->get_DepthFrameSource(&frameSource);
    if (SUCCEEDED(hr)) {
      hr = frameSource->OpenReader(&m_pFrameReader.depth);
      if (FAILED(hr) && m_pFrameReader.depth) {
        m_pFrameReader.depth->Release();
        m_pFrameReader.depth = NULL;
      }
    }
    if (frameSource) {
      frameSource->Release();
    }
  }
  else if(!m_pFrameReader.infrared) { // ONI_SENSOR_IR
    IInfraredFrameSource* frameSource = NULL;
    HRESULT hr = m_pKinectSensor->get_InfraredFrameSource(&frameSource);
    if (SUCCEEDED(hr)) {
      hr = frameSource->OpenReader(&m_pFrameReader.infrared);
      if (FAILED(hr) && m_pFrameReader.infrared) {
        m_pFrameReader.infrared->Release();
        m_pFrameReader.infrared = NULL;
      }
    }
    if (frameSource) {
      frameSource->Release();
    }
  }
}
int TextureManager::initializeDefaultSensor(char *errorMessage)
{
	HRESULT hr;

	sprintf_s(errorMessage, MAX_ERROR_LENGTH, "");

	hr = GetDefaultKinectSensor(&kinect_sensor_);
	if (FAILED(hr))
	{
		sprintf_s(errorMessage, MAX_ERROR_LENGTH, "Could not initialize Kinect");
		kinect_sensor_ = NULL;
		return -1;
	}

	if (kinect_sensor_)
	{
		// Initialize the Kinect and get the depth reader
		IDepthFrameSource* depthFrameSource = NULL;

		hr = kinect_sensor_->Open();

		if (SUCCEEDED(hr))
		{
			hr = kinect_sensor_->get_DepthFrameSource(&depthFrameSource);
		}

		if (SUCCEEDED(hr))
		{
			hr = depthFrameSource->OpenReader(&depth_frame_reader_);
		}

		if (depthFrameSource)
		{
			depthFrameSource->Release();
			depthFrameSource = NULL;
		}
	}

	if (!kinect_sensor_ || FAILED(hr))
	{	
		sprintf_s(errorMessage, MAX_ERROR_LENGTH,
			"Could not open kinect depth reader");
		return -1;
	}

	return 0;
}
Beispiel #11
0
Kinect2Engine::Kinect2Engine(const char *calibFilename) : BaseImageSourceEngine(calibFilename)
{
	imageSize_d = Vector2i(512, 424);
	imageSize_rgb = Vector2i(640, 480);
	
	data = new PrivateData();

	colorAvailable = false;

	HRESULT hr;

	depthAvailable = true;

	hr = GetDefaultKinectSensor(&data->kinectSensor);
	if (FAILED(hr))
	{
		depthAvailable = false;
		printf("Kinect2: Failed to initialise depth camera\n");
		return;
	}

	if (data->kinectSensor)
	{
		IDepthFrameSource* pDepthFrameSource = NULL;

		hr = data->kinectSensor->Open();

		if (SUCCEEDED(hr))
			hr = data->kinectSensor->get_DepthFrameSource(&pDepthFrameSource);

		if (SUCCEEDED(hr))
			hr = pDepthFrameSource->OpenReader(&data->depthFrameReader);

		SafeRelease(pDepthFrameSource);
	}

	if (!data->kinectSensor || FAILED(hr))
	{
		depthAvailable = false;
		printf("Kinect2: No ready Kinect 2 sensor found\n");
		return;
	}
}
Beispiel #12
0
/// <summary>
/// Initializes the default Kinect sensor
/// </summary>
/// <returns>indicates success or failure</returns>
HRESULT CDepthBasics::InitializeDefaultSensor()
{
    HRESULT hr;

    hr = GetDefaultKinectSensor(&m_pKinectSensor);
    if (FAILED(hr))
    {
        return hr;
    }

    if (m_pKinectSensor)
    {
        // Initialize the Kinect and get the depth reader
        IDepthFrameSource* pDepthFrameSource = NULL;
        
        hr = m_pKinectSensor->Open();

        if (SUCCEEDED(hr))
        {
            hr = m_pKinectSensor->get_DepthFrameSource(&pDepthFrameSource);
        }

        if (SUCCEEDED(hr))
        {
            hr = pDepthFrameSource->OpenReader(&m_pDepthFrameReader);
        }
        
        
        SafeRelease(pDepthFrameSource);
    }

    if (!m_pKinectSensor || FAILED(hr))
    {
        SetStatusMessage(L"No ready Kinect found!", 10000, true);
        return E_FAIL;
    }

    return hr;
}
		//----------
		void Depth::init(IKinectSensor * sensor) {
			this->reader = NULL;
			try {
				IDepthFrameSource * source = NULL;

				if (FAILED(sensor->get_DepthFrameSource(& source))) {
					throw(Exception("Failed to initialise Depth source"));
				}

				if (FAILED(source->OpenReader(& this->reader))) {
					throw(Exception("Failed to initialise Depth reader"));
				}

				SafeRelease(source);

				if (FAILED(sensor->get_CoordinateMapper(&this->coordinateMapper))) {
					throw(Exception("Failed to acquire coordinate mapper"));
				}
			} catch (std::exception & e) {
				SafeRelease(this->reader);
				throw (e);
			}
		}
Beispiel #14
0
	HRESULT InitializeDefaultSensor()
	{
		HRESULT hr;

		hr = GetDefaultKinectSensor(&m_pKinectSensor);
		if (FAILED(hr))
		{
			return hr;
		}

		if (m_pKinectSensor)
		{
			// Initialize the Kinect and get coordinate mapper and the body reader
			IBodyFrameSource* pBodyFrameSource = NULL;

			hr = m_pKinectSensor->Open();

			if (SUCCEEDED(hr))
			{
				hr = m_pKinectSensor->get_CoordinateMapper(&m_pCoordinateMapper);
			}

			if (SUCCEEDED(hr))
			{
				hr = m_pKinectSensor->get_BodyFrameSource(&pBodyFrameSource);
			}

			if (SUCCEEDED(hr))
			{
				hr = pBodyFrameSource->OpenReader(&m_pBodyFrameReader);
			}

			SafeRelease(pBodyFrameSource);

			
			// get the color reader
			IColorFrameSource* pColorFrameSource = NULL;

			if (SUCCEEDED(hr))
			{

				hr = m_pKinectSensor->get_ColorFrameSource(&pColorFrameSource);
			}

			if (SUCCEEDED(hr))
			{
				hr = pColorFrameSource->OpenReader(&m_pColorFrameReader);
			} 

			SafeRelease(pColorFrameSource);

			
			// get the depth reader

			IDepthFrameSource* pDepthFrameSource = NULL;

			if (SUCCEEDED(hr))
			{
				hr = m_pKinectSensor->get_DepthFrameSource(&pDepthFrameSource);
			}

			if (SUCCEEDED(hr))
			{
				hr = pDepthFrameSource->OpenReader(&m_pDepthFrameReader);
			}

			SafeRelease(pDepthFrameSource);

			//get the body frame index reader

			IBodyIndexFrameSource * pBodyIndexFrameSource = NULL;

			if(SUCCEEDED(hr)){
				hr = m_pKinectSensor->get_BodyIndexFrameSource(&pBodyIndexFrameSource);
			}

			if(SUCCEEDED(hr)){
				hr = pBodyIndexFrameSource->OpenReader(&m_pBodyIndexFrameReader);
			}

			SafeRelease(pBodyIndexFrameSource);

		}

		if (!m_pKinectSensor || FAILED(hr))
		{
			std::cout << "no ready Kinect found!";
			return E_FAIL;
		}

		return hr;
	}
int main(int argc, char** argv)
{
	// 1a. Get default Sensor
	cout << "Try to get default sensor" << endl;
	IKinectSensor* pSensor = nullptr;
	if (GetDefaultKinectSensor(&pSensor) != S_OK)
	{
		cerr << "Get Sensor failed" << endl;
	}
	else
	{
		// 1b. Open sensor
		cout << "Try to open sensor" << endl;
		if (pSensor->Open() != S_OK)
		{
			cerr << "Can't open sensor" << endl;
		}
		else
		{
			// 2a. Get frame source
			cout << "Try to get source" << endl;
			IDepthFrameSource* pFrameSource = nullptr;
			if (pSensor->get_DepthFrameSource(&pFrameSource) != S_OK)
			{
				cerr << "Can't get frame source" << endl;
			}
			else
			{
				// 2b. Get frame description
				int		iWidth = 0;
				int		iHeight = 0;
				IFrameDescription* pFrameDescription = nullptr;
				if (pFrameSource->get_FrameDescription(&pFrameDescription) == S_OK)
				{
					pFrameDescription->get_Width(&iWidth);
					pFrameDescription->get_Height(&iHeight);
					pFrameDescription->Release();
					pFrameDescription = nullptr;
				}

				// 2c. get some dpeth only meta
				UINT16 uDepthMin = 0, uDepthMax = 0;
				pFrameSource->get_DepthMinReliableDistance(&uDepthMin);
				pFrameSource->get_DepthMaxReliableDistance(&uDepthMax);
				cout << "Reliable Distance: " << uDepthMin << " - " << uDepthMax << endl;

				// perpare OpenCV
				cv::Mat mDepthImg(iHeight, iWidth, CV_16UC1);
				cv::Mat mImg8bit(iHeight, iWidth, CV_8UC1);
				cv::namedWindow( "Depth Map" );

				// 3a. get frame reader
				cout << "Try to get frame reader" << endl;
				IDepthFrameReader* pFrameReader = nullptr;
				if (pFrameSource->OpenReader(&pFrameReader) != S_OK)
				{
					cerr << "Can't get frame reader" << endl;
				}
				else
				{
					// Enter main loop
					cout << "Enter main loop" << endl;
					while (true)
					{
						// 4a. Get last frame
						IDepthFrame* pFrame = nullptr;
						if (pFrameReader->AcquireLatestFrame(&pFrame) == S_OK)
						{
							// 4c. copy the depth map to image
							if (pFrame->CopyFrameDataToArray(iWidth * iHeight, reinterpret_cast<UINT16*>(mDepthImg.data)) == S_OK)
							{
								// 4d. convert from 16bit to 8bit
								mDepthImg.convertTo(mImg8bit, CV_8U, 255.0f / uDepthMax);
								cv::imshow("Depth Map", mImg8bit);
							}
							else
							{
								cerr << "Data copy error" << endl;
							}

							// 4e. release frame
							pFrame->Release();
						}

						// 4f. check keyboard input
						if (cv::waitKey(30) == VK_ESCAPE){
							break;
						}
					}

					// 3b. release frame reader
					cout << "Release frame reader" << endl;
					pFrameReader->Release();
					pFrameReader = nullptr;
				}

				// 2d. release Frame source
				cout << "Release frame source" << endl;
				pFrameSource->Release();
				pFrameSource = nullptr;
			}

			// 1c. Close Sensor
			cout << "close sensor" << endl;
			pSensor->Close();
		}

		// 1d. Release Sensor
		cout << "Release sensor" << endl;
		pSensor->Release();
		pSensor = nullptr;
	}
	return 0;
}
Beispiel #16
0
int main()
{
	// 1. Sensor related code
	cout << "Try to get default sensor" << endl;
	{
		if (GetDefaultKinectSensor(&pSensor) != S_OK) {
			cerr << "Get Sensor failed" << endl;
			return -1;
		}

		cout << "Try to open sensor" << endl;
		if (pSensor->Open() != S_OK)	 {
			cerr << "Can't open sensor" << endl;
			return -1;
		}
	}

	// 2. Color related code
	cout << "Try to get color source" << endl;
	{
		// Get frame source
		IColorFrameSource* pFrameSource = nullptr;
		if (pSensor->get_ColorFrameSource(&pFrameSource) != S_OK) {
			cerr << "Can't get color frame source" << endl;
			return -1;
		}

		// Get frame description
		cout << "get color frame description" << endl;
		IFrameDescription* pFrameDescription = nullptr;
		if (pFrameSource->get_FrameDescription(&pFrameDescription) == S_OK)	{
			pFrameDescription->get_Width(&iColorWidth);
			pFrameDescription->get_Height(&iColorHeight);

			uColorPointNum = iColorWidth * iColorHeight;
			uColorBufferSize = uColorPointNum * 4 * sizeof(BYTE);

			pCSPoints = new CameraSpacePoint[uColorPointNum];
			pColorBuffer = new BYTE[4 * uColorPointNum];
		}
		pFrameDescription->Release();
		pFrameDescription = nullptr;

		// get frame reader
		cout << "Try to get color frame reader" << endl;
		if (pFrameSource->OpenReader(&pColorFrameReader) != S_OK) {
			cerr << "Can't get color frame reader" << endl;
			return -1;
		}

		// release Frame source
		cout << "Release frame source" << endl;
		pFrameSource->Release();
		pFrameSource = nullptr;
	}

	// 3. Depth related code
	cout << "Try to get depth source" << endl;
	{
		// Get frame source
		IDepthFrameSource* pFrameSource = nullptr;
		if (pSensor->get_DepthFrameSource(&pFrameSource) != S_OK) {
			cerr << "Can't get depth frame source" << endl;
			return -1;
		}

		// Get frame description
		cout << "get depth frame description" << endl;
		IFrameDescription* pFrameDescription = nullptr;
		if (pFrameSource->get_FrameDescription(&pFrameDescription) == S_OK) {
			int	iDepthWidth = 0,
				iDepthHeight = 0;
			pFrameDescription->get_Width(&iDepthWidth);
			pFrameDescription->get_Height(&iDepthHeight);
			uDepthPointNum = iDepthWidth * iDepthHeight;
			pDepthBuffer = new UINT16[uDepthPointNum];
		}
		pFrameDescription->Release();
		pFrameDescription = nullptr;

		// get frame reader
		cout << "Try to get depth frame reader" << endl;
		if (pFrameSource->OpenReader(&pDepthFrameReader) != S_OK) {
			cerr << "Can't get depth frame reader" << endl;
			return -1;
		}

		// release Frame source
		cout << "Release frame source" << endl;
		pFrameSource->Release();
		pFrameSource = nullptr;
	}

	// 4. Coordinate Mapper
	if (pSensor->get_CoordinateMapper(&pCoordinateMapper) != S_OK) {
		cerr << "get_CoordinateMapper failed" << endl;
		return -1;
	}

	while (1) {
		idle();
		if ((int)pColorBuffer[0] != 0) {		
			capture_point();
		}
	}
	ExitFunction();

	return 0;
}
int main()
{

	// name and position windows
	cvNamedWindow("Color Probabilistic Tracking - Samples", 1);
	cvMoveWindow("Color Probabilistic Tracking - Samples", 0, 0);
	cvNamedWindow("Color Probabilistic Tracking - Result", 1);
	cvMoveWindow("Color Probabilistic Tracking - Result", 1000, 0);

	//control mouse
	setMouseCallback("Color Probabilistic Tracking - Samples", onMouse, 0);

	cv::setUseOptimized(true);

	// Sensor
	IKinectSensor* pSensor;
	HRESULT hResult = S_OK;
	hResult = GetDefaultKinectSensor(&pSensor);
	if (FAILED(hResult)) {
		std::cerr << "Error : GetDefaultKinectSensor" << std::endl;
		return -1;
	}

	hResult = pSensor->Open();
	if (FAILED(hResult)) {
		std::cerr << "Error : IKinectSensor::Open()" << std::endl;
		return -1;
	}

	// Source
	IColorFrameSource* pColorSource;
	hResult = pSensor->get_ColorFrameSource(&pColorSource);
	if (FAILED(hResult)) {
		std::cerr << "Error : IKinectSensor::get_ColorFrameSource()" << std::endl;
		return -1;
	}

	IDepthFrameSource* pDepthSource;
	hResult = pSensor->get_DepthFrameSource(&pDepthSource);
	if (FAILED(hResult)) {
		std::cerr << "Error : IKinectSensor::get_DepthFrameSource()" << std::endl;
		return -1;
	}

	/*IBodyIndexFrameSource* pBodyIndexSource;
	hResult = pSensor->get_BodyIndexFrameSource(&pBodyIndexSource);*/

	// Reader
	IColorFrameReader* pColorReader;
	hResult = pColorSource->OpenReader(&pColorReader);
	if (FAILED(hResult)) {
		std::cerr << "Error : IColorFrameSource::OpenReader()" << std::endl;
		return -1;
	}

	IDepthFrameReader* pDepthReader;
	hResult = pDepthSource->OpenReader(&pDepthReader);
	if (FAILED(hResult)) {
		std::cerr << "Error : IDepthFrameSource::OpenReader()" << std::endl;
		return -1;
	}

	//IBodyIndexFrameReader* pBodyIndexReader;//saferealease
	//hResult = pBodyIndexSource->OpenReader(&pBodyIndexReader);

	// Description
	IFrameDescription* pColorDescription;
	hResult = pColorSource->get_FrameDescription(&pColorDescription);
	if (FAILED(hResult)) {
		std::cerr << "Error : IColorFrameSource::get_FrameDescription()" << std::endl;
		return -1;
	}

	int colorWidth = 0;
	int colorHeight = 0;
	pColorDescription->get_Width(&colorWidth); // 1920
	pColorDescription->get_Height(&colorHeight); // 1080
	unsigned int colorBufferSize = colorWidth * colorHeight * 4 * sizeof(unsigned char);

	cv::Mat colorBufferMat(colorHeight, colorWidth, CV_8UC4);
	cv::Mat colorMat(colorHeight / 2, colorWidth / 2, CV_8UC4);
	cv::namedWindow("Color");

	RGBQUAD* m_pDepthRGBX;
	m_pDepthRGBX = new RGBQUAD[512 * 424];// create heap storage for color pixel data in RGBX format

	IFrameDescription* pDepthDescription;
	hResult = pDepthSource->get_FrameDescription(&pDepthDescription);
	if (FAILED(hResult)) {
		std::cerr << "Error : IDepthFrameSource::get_FrameDescription()" << std::endl;
		return -1;
	}

	int depthWidth = 0;
	int depthHeight = 0;
	pDepthDescription->get_Width(&depthWidth); // 512
	pDepthDescription->get_Height(&depthHeight); // 424
	unsigned int depthBufferSize = depthWidth * depthHeight * sizeof(unsigned short);

	cv::Mat depthBufferMat(depthHeight, depthWidth, CV_16UC1);
	UINT16* pDepthBuffer = nullptr;
	cv::Mat depthMat(depthHeight, depthWidth, CV_8UC1);
	cv::namedWindow("Depth");

	//UINT32 nBodyIndexSize = 0;
	//BYTE* pIndexBuffer = nullptr;//This hasn't been safe realease yet

	// Coordinate Mapper
	ICoordinateMapper* pCoordinateMapper;
	hResult = pSensor->get_CoordinateMapper(&pCoordinateMapper);
	if (FAILED(hResult)) {
		std::cerr << "Error : IKinectSensor::get_CoordinateMapper()" << std::endl;
		return -1;
	}

	cv::Mat coordinateMapperMat(depthHeight, depthWidth, CV_8UC4);
	cv::namedWindow("CoordinateMapper");

	unsigned short minDepth, maxDepth;
	pDepthSource->get_DepthMinReliableDistance(&minDepth);
	pDepthSource->get_DepthMaxReliableDistance(&maxDepth);




	while (1) {

		double t = (double)getTickCount();


		// Color Frame
		IColorFrame* pColorFrame = nullptr;
		hResult = pColorReader->AcquireLatestFrame(&pColorFrame);
		if (SUCCEEDED(hResult)) {
			hResult = pColorFrame->CopyConvertedFrameDataToArray(colorBufferSize, reinterpret_cast<BYTE*>(colorBufferMat.data), ColorImageFormat::ColorImageFormat_Bgra);
			if (SUCCEEDED(hResult)) {
				cv::resize(colorBufferMat, colorMat, cv::Size(), 0.5, 0.5);
			}
		}
		//SafeRelease( pColorFrame );

		// Depth Frame
		IDepthFrame* pDepthFrame = nullptr;
		hResult = pDepthReader->AcquireLatestFrame(&pDepthFrame);

		if (SUCCEEDED(hResult)) {
			hResult = pDepthFrame->AccessUnderlyingBuffer(&depthBufferSize, reinterpret_cast<UINT16**>(&depthBufferMat.data));

		}

		if (SUCCEEDED(hResult)) {
			hResult = pDepthFrame->AccessUnderlyingBuffer(&depthBufferSize, &pDepthBuffer);

			if (SUCCEEDED(hResult))
			{
				RGBQUAD* pRGBX = m_pDepthRGBX;

				// end pixel is start + width*height - 1
				const UINT16* pBufferEnd = pDepthBuffer + (512 * 424);
				int index = 0;

				while (pDepthBuffer < pBufferEnd)
				{
					USHORT depth = *pDepthBuffer;

					BYTE intensity = static_cast<BYTE>((depth >= 50) && (depth <= 5000) ? (depth % 256) : 0);

					pRGBX->rgbRed = intensity;
					pRGBX->rgbGreen = intensity;
					pRGBX->rgbBlue = intensity;

					depthData[index] = depth;

					++index;
					++pRGBX;
					++pDepthBuffer;
				}
			}
		}

		Mat DepthImage(424, 512, CV_8UC4, m_pDepthRGBX);
		//SafeRelease( pDepthFrame );

		// Mapping (Depth to Color)
		if (SUCCEEDED(hResult)) {
			std::vector<ColorSpacePoint> colorSpacePoints(depthWidth * depthHeight);
			hResult = pCoordinateMapper->MapDepthFrameToColorSpace(depthWidth * depthHeight, reinterpret_cast<UINT16*>(depthBufferMat.data), depthWidth * depthHeight, &colorSpacePoints[0]);
			if (SUCCEEDED(hResult)) {
				coordinateMapperMat = cv::Scalar(0, 0, 0, 0);
				for (int y = 0; y < depthHeight; y++) {
					for (int x = 0; x < depthWidth; x++) {
						unsigned int index = y * depthWidth + x;
						ColorSpacePoint point = colorSpacePoints[index];
						int colorX = static_cast<int>(std::floor(point.X + 0.5));
						int colorY = static_cast<int>(std::floor(point.Y + 0.5));
						unsigned short depth = depthBufferMat.at<unsigned short>(y, x);
						if ((colorX >= 0) && (colorX < colorWidth) && (colorY >= 0) && (colorY < colorHeight)/* && ( depth >= minDepth ) && ( depth <= maxDepth )*/) {
							coordinateMapperMat.at<cv::Vec4b>(y, x) = colorBufferMat.at<cv::Vec4b>(colorY, colorX);
						}
					}
				}
			}
		}

		if (SUCCEEDED(hResult))
		{

			//Particle Filter Start from here
			frame = &(IplImage)coordinateMapperMat;//transorm mat into IplImage

			if (image == 0)
			{
				// initialize variables and allocate buffers 
				image = cvCreateImage(cvGetSize(frame), 8, 4);//every pixel has 4 channels
				image->origin = frame->origin;

				result = cvCreateImage(cvGetSize(frame), 8, 4);
				result->origin = frame->origin;

				hsv = cvCreateImage(cvGetSize(frame), 8, 3);

				hue = cvCreateImage(cvGetSize(frame), 8, 1);

				sat = cvCreateImage(cvGetSize(frame), 8, 1);

				histimg_ref = cvCreateImage(cvGetSize(frame), 8, 3);
				histimg_ref->origin = frame->origin;
				cvZero(histimg_ref);

				histimg = cvCreateImage(cvGetSize(frame), 8, 3);
				histimg->origin = frame->origin;
				cvZero(histimg);

				bin_w = histimg_ref->width / BIN;
				bin_h = histimg_ref->height / BIN;


				data1.sample_t = reinterpret_cast<Region *> (malloc(sizeof(Region)* SAMPLE));
				data1.sample_t_1 = reinterpret_cast<Region *> (malloc(sizeof(Region)* SAMPLE));
				data1.sample_weight = reinterpret_cast<double *> (malloc(sizeof(double)* SAMPLE));
				data1.accum_weight = reinterpret_cast<double *> (malloc(sizeof(double)* SAMPLE));

			}

			cvCopy(frame, image);
			cvCopy(frame, result);
			cvCvtColor(image, hsv, CV_BGR2HSV);//image ~ hsv


			if (tracking)
			{
				//v_max = 0.0;

				cvSplit(hsv, hue, 0, 0, 0);//hsv->hue
				cvSplit(hsv, 0, 0, sat, 0);//hsv-saturation

				if (selecting)
				{

					// get the selected target area
					//ref_v_max = 0.0;
					area.width = abs(P_org.x - P_end.x);
					area.height = abs(P_org.y - P_end.y);
					area.x = MIN(P_org.x, P_end.x);
					area.y = MIN(P_org.y, P_end.y);

					cvZero(histimg_ref);

					// build reference histogram
					cvSetImageROI(hue, area);
					cvSetImageROI(sat, area);

					// zero reference histogram
					for (i = 0; i < BIN; i++)
						for (j = 0; j < BIN; j++)
							hist_ref[i][j] = 0.0;

					// calculate reference histogram
					for (i = 0; i < area.height; i++)
					{
						for (j = 0; j < area.width; j++)
						{
							im_hue = cvGet2D(hue, i, j);
							im_sat = cvGet2D(sat, i, j);
							k = int(im_hue.val[0] / STEP_HUE);
							h = int(im_sat.val[0] / STEP_SAT);
							hist_ref[k][h] = hist_ref[k][h] + 1.0;
						}
					}


					// rescale the value of each bin in the reference histogram 
					// and show it as an image
					for (i = 0; i < BIN; i++)
					{
						for (j = 0; j < BIN; j++)
						{
							hist_ref[i][j] = hist_ref[i][j] / (area.height*area.width);
						}
					}

					cvResetImageROI(hue);
					cvResetImageROI(sat);

					// initialize tracking and samples
					track_win = area;
					Initdata(track_win);
					track_win_last = track_win;

					// set up flag of tracking
					selecting = 0;

				}

				// sample propagation and weighting
				track_win = ImProcess(hue, sat, hist_ref, track_win_last);
				FrameNumber++;

				track_win_last = track_win;
				cvZero(histimg);

				// draw the one RED bounding box
				cvRectangle(image, cvPoint(track_win.x, track_win.y), cvPoint(track_win.x + track_win.width, track_win.y + track_win.height), CV_RGB(255, 0, 0), 2);
				printf("width = %d, height = %d\n", track_win.width, track_win.height);

				//save certian images
				if (FrameNumber % 10 == 0)
				{
					if (FrameNumber / 10 == 1) cvSaveImage("./imageout1.jpg", image);

					if (FrameNumber / 10 == 2) cvSaveImage("./imageout2.jpg", image);

					if (FrameNumber / 10 == 3) cvSaveImage("./imageout3.jpg", image);

					if (FrameNumber / 10 == 4) cvSaveImage("./imageout4.jpg", image);

					if (FrameNumber / 10 == 5) cvSaveImage("./imageout5.jpg", image);

					if (FrameNumber / 10 == 6) cvSaveImage("./imageout6.jpg", image);

					if (FrameNumber / 10 == 7) cvSaveImage("./imageout7.jpg", image);

					if (FrameNumber / 10 == 8) cvSaveImage("./imageout8.jpg", image);
				}

				//save certian images
				if (FrameNumber % 10 == 0)
				{
					if (FrameNumber / 10 == 1) cvSaveImage("./resultout1.jpg", result);

					if (FrameNumber / 10 == 2) cvSaveImage("./resultout2.jpg", result);

					if (FrameNumber / 10 == 3) cvSaveImage("./resultout3.jpg", result);

					if (FrameNumber / 10 == 4) cvSaveImage("./resultout4.jpg", result);

					if (FrameNumber / 10 == 5) cvSaveImage("./resultout5.jpg", result);

					if (FrameNumber / 10 == 6) cvSaveImage("./resultout6.jpg", result);

					if (FrameNumber / 10 == 7) cvSaveImage("./resultout7.jpg", result);

					if (FrameNumber / 10 == 8) cvSaveImage("./resultout8.jpg", result);
				}

				//draw a same bounding box in DepthImage
				rectangle(DepthImage, track_win, CV_RGB(255, 0, 0), 2);

				//******************************************************Geodesic Distance***************************************************************************************
					//Point propagation and weight
					if (PointTrack == 1)
					{

						if (PointSelect == 1)//only visit once
						{

							// initialize tracking and samples
							for (int i = 0; i < SAMPLE; i++)
							{
								point[i].x_1 = P_track.x;
								point[i].y_1 = P_track.y;
								point[i].z_1 = depthData[P_track.x + P_track.y * 512];
								point[i].x_1_prime = 0.0;
								point[i].y_1_prime = 0.0;
							}
							
							refeFlag = 1;

							p_win = P_track;

							//p_transtart is the start point of the surface mesh
							P_transtart.x = track_win.x;
							P_transtart.y = track_win.y;

							PointSelect = 0;

						}

						//construct the graph(mesh)
						ConstructMesh(depthData, adjlist, P_transtart,track_win.width,track_win.height);

						//calculate shortest path
						vector<int> vertexDist;
						vertexDist.resize(track_win.width*track_win.height);

						ShortestPath(P_extre, adjlist,  vertexDist);

						cvCircle(image, P_extre, 3, CV_RGB(0, 255, 0),1);

						//generate the refernce distance for comparing
						if (refeFlag > 0)
						{
							cvCircle(image, p_win, 3, CV_RGB(0, 0, 255), 1);

							int track = abs(P_transtart.x - P_track.x) + track_win.width * abs(P_transtart.y - P_track.y);

							referDistance = vertexDist[track];

							refeFlag = 0;
						}
						
						//samples propagation
						PredictPoint(p_win);

						//get geodesic distance for each sample. 
						//find the sample which have most similar distance to the refernce distance
						float Dist, AbsDist, WinDist, minAbsDist = 10000;
						int number,sum=0,count=0;

						for (int i = 0; i < SAMPLE; i++)
						{
							int t = abs(P_transtart.x - point[i].x) + track_win.width * abs(P_transtart.y - point[i].y);
							if (adjlist[t].v == false) { count++; continue; }

							int refer = abs(point[i].x - P_transtart.x) + track_win.width * abs(point[i].y - P_transtart.y);
							Dist = vertexDist[refer];

							AbsDist = fabs(referDistance - Dist);

							//point[i].SampleWeight = AbsDist;
							//point[i].AccumWeight = sum;
							//sum = sum + AbsDist;

							if (AbsDist < minAbsDist) { AbsDist = Dist; number = i; WinDist = Dist; }

						}

						//for (int i = 0; i < SAMPLE; i++)
						//{
						//	point[i].SampleWeight = point[i].SampleWeight / sum;
						//	point[i].AccumWeight = point[i].AccumWeight / sum;
						//}

						printf("referDist = %f, winDist = %f, discardPoints = %d\n", referDistance, WinDist,count);
						
						p_win_last = p_win;
						p_win.x = point[number].x;
						p_win.y = point[number].y;

						//samples re-location
						float deltaX = p_win.x - p_win_last.x;
						float deltaY = p_win.y - p_win_last.y;

						UpdatePoint(number, deltaX, deltaY);

						cvCircle(image, p_win, 5, CV_RGB(0, 0, 0));
					}



				//	//****************************************************************************************************************************************
			}

			// if still selecting a target, show the RED selected area
			else cvRectangle(image, P_org, P_end, CV_RGB(255, 0, 0), 1);

		}

		imshow("Depth", DepthImage);
		cvShowImage("Color Probabilistic Tracking - Samples", image);
		cvShowImage("Color Probabilistic Tracking - Result", result);

		SafeRelease(pColorFrame);
		SafeRelease(pDepthFrame);
		//SafeRelease(pBodyIndexFrame);

		cv::imshow("Color", colorMat);
		cv::imshow("Depth", DepthImage);
		cv::imshow("CoordinateMapper", coordinateMapperMat);

		//END OF THE TIME POINT
		t = ((double)getTickCount() - t) / getTickFrequency();
		t = 1 / t;

		//cout << "FPS:" << t << "FrameNumber\n" << FrameNumebr<< endl;
		printf("FPS:%f Frame:%d \n\n", t, FrameNumber);


		if (cv::waitKey(30) == VK_ESCAPE) {
			break;
		}
	}

	SafeRelease(pColorSource);
	SafeRelease(pDepthSource);
	//SafeRelease(pBodyIndexSource);

	SafeRelease(pColorReader);
	SafeRelease(pDepthReader);
	//SafeRelease(pBodyIndexReader);

	SafeRelease(pColorDescription);
	SafeRelease(pDepthDescription);
	SafeRelease(pCoordinateMapper);
	if (pSensor) {
		pSensor->Close();
	}
	SafeRelease(pSensor);
	cv::destroyAllWindows();


	cvReleaseImage(&image);
	cvReleaseImage(&result);
	cvReleaseImage(&histimg_ref);
	cvReleaseImage(&histimg);
	cvReleaseImage(&hsv);
	cvReleaseImage(&hue);
	cvReleaseImage(&sat);
	cvDestroyWindow("Color Probabilistic Tracking - Samples");
	cvDestroyWindow("Color Probabilistic Tracking - Result");

	return 0;
}
Beispiel #18
0
int _tmain( int argc, _TCHAR* argv[] )
{
	cv::setUseOptimized( true );

	// Sensor
	IKinectSensor* pSensor;
	HRESULT hResult = S_OK;
	hResult = GetDefaultKinectSensor( &pSensor );
	if( FAILED( hResult ) ){
		std::cerr << "Error : GetDefaultKinectSensor" << std::endl;
		return -1;
	}

	hResult = pSensor->Open();
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::Open()" << std::endl;
		return -1;
	}

	// Source
	IDepthFrameSource* pDepthSource;
	hResult = pSensor->get_DepthFrameSource( &pDepthSource );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::get_DepthFrameSource()" << std::endl;
		return -1;
	}

	// Reader
	IDepthFrameReader* pDepthReader;
	hResult = pDepthSource->OpenReader( &pDepthReader );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IDepthFrameSource::OpenReader()" << std::endl;
		return -1;
	}

	// Description
	IFrameDescription* pDescription;
	hResult = pDepthSource->get_FrameDescription( &pDescription );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IDepthFrameSource::get_FrameDescription()" << std::endl;
		return -1;
	}

	int width = 0;
	int height = 0;
	pDescription->get_Width( &width ); // 512
	pDescription->get_Height( &height ); // 424
	unsigned int bufferSize = width * height * sizeof( unsigned short );

	cv::Mat bufferMat( height, width, CV_16UC1 );
	cv::Mat depthMat( height, width, CV_8UC1 );
	cv::namedWindow( "Depth" );

	// Coordinate Mapper
	ICoordinateMapper* pCoordinateMapper;
	hResult = pSensor->get_CoordinateMapper( &pCoordinateMapper );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::get_CoordinateMapper()" << std::endl;
		return -1;
	}

	// Create Reconstruction
	NUI_FUSION_RECONSTRUCTION_PARAMETERS reconstructionParameter;
	reconstructionParameter.voxelsPerMeter = 256;
	reconstructionParameter.voxelCountX = 512;
	reconstructionParameter.voxelCountY = 384;
	reconstructionParameter.voxelCountZ = 512;

	Matrix4 worldToCameraTransform;
	SetIdentityMatrix( worldToCameraTransform );
	INuiFusionReconstruction* pReconstruction;
	hResult = NuiFusionCreateReconstruction( &reconstructionParameter, NUI_FUSION_RECONSTRUCTION_PROCESSOR_TYPE_AMP, -1, &worldToCameraTransform, &pReconstruction );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateReconstruction()" << std::endl;
		return -1;
	}

	// Create Image Frame
	// Depth
	NUI_FUSION_IMAGE_FRAME* pDepthFloatImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_FLOAT, width, height, nullptr, &pDepthFloatImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( FLOAT )" << std::endl;
		return -1;
	}

	// SmoothDepth
	NUI_FUSION_IMAGE_FRAME* pSmoothDepthFloatImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_FLOAT, width, height, nullptr, &pSmoothDepthFloatImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( FLOAT )" << std::endl;
		return -1;
	}

	// Point Cloud
	NUI_FUSION_IMAGE_FRAME* pPointCloudImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_POINT_CLOUD, width, height, nullptr, &pPointCloudImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( POINT_CLOUD )" << std::endl;
		return -1;
	}

	// Surface
	NUI_FUSION_IMAGE_FRAME* pSurfaceImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_COLOR, width, height, nullptr, &pSurfaceImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( COLOR )" << std::endl;
		return -1;
	}

	// Normal
	NUI_FUSION_IMAGE_FRAME* pNormalImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_COLOR, width, height, nullptr, &pNormalImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( COLOR )" << std::endl;
		return -1;
	}

	cv::namedWindow( "Surface" );
	cv::namedWindow( "Normal" );

	while( 1 ){
		// Frame
		IDepthFrame* pDepthFrame = nullptr;
		hResult = pDepthReader->AcquireLatestFrame( &pDepthFrame );

		if( SUCCEEDED( hResult ) ){
			hResult = pDepthFrame->AccessUnderlyingBuffer( &bufferSize, reinterpret_cast<UINT16**>( &bufferMat.data ) );
			if( SUCCEEDED( hResult ) ){
				bufferMat.convertTo( depthMat, CV_8U, -255.0f / 8000.0f, 255.0f );
				hResult = pReconstruction->DepthToDepthFloatFrame( reinterpret_cast<UINT16*>( bufferMat.data ), width * height * sizeof( UINT16 ), pDepthFloatImageFrame, NUI_FUSION_DEFAULT_MINIMUM_DEPTH/* 0.5[m] */, NUI_FUSION_DEFAULT_MAXIMUM_DEPTH/* 8.0[m] */, true );
				if( FAILED( hResult ) ){
					std::cerr << "Error :INuiFusionReconstruction::DepthToDepthFloatFrame()" << std::endl;
					return -1;
				}
			}
		}
		SafeRelease( pDepthFrame );

		// Smooting Depth Image Frame
		hResult = pReconstruction->SmoothDepthFloatFrame( pDepthFloatImageFrame, pSmoothDepthFloatImageFrame, 1, 0.04f );
		if( FAILED( hResult ) ){
			std::cerr << "Error :INuiFusionReconstruction::SmoothDepthFloatFrame" << std::endl;
			return -1;
		}

		// Reconstruction Process
		pReconstruction->GetCurrentWorldToCameraTransform( &worldToCameraTransform );
		hResult = pReconstruction->ProcessFrame( pSmoothDepthFloatImageFrame, NUI_FUSION_DEFAULT_ALIGN_ITERATION_COUNT, NUI_FUSION_DEFAULT_INTEGRATION_WEIGHT, nullptr, &worldToCameraTransform );
		if( FAILED( hResult ) ){
			static int errorCount = 0;
			errorCount++;
			if( errorCount >= 100 ) {
				errorCount = 0;
				ResetReconstruction( pReconstruction, &worldToCameraTransform );
			}
		}

		// Calculate Point Cloud
		hResult = pReconstruction->CalculatePointCloud( pPointCloudImageFrame, &worldToCameraTransform );
		if( FAILED( hResult ) ){
			std::cerr << "Error : CalculatePointCloud" << std::endl;
			return -1;
		}

		// Shading Point Clouid
		Matrix4 worldToBGRTransform = { 0.0f };
		worldToBGRTransform.M11 = reconstructionParameter.voxelsPerMeter / reconstructionParameter.voxelCountX;
		worldToBGRTransform.M22 = reconstructionParameter.voxelsPerMeter / reconstructionParameter.voxelCountY;
		worldToBGRTransform.M33 = reconstructionParameter.voxelsPerMeter / reconstructionParameter.voxelCountZ;
		worldToBGRTransform.M41 = 0.5f;
		worldToBGRTransform.M42 = 0.5f;
		worldToBGRTransform.M43 = 0.0f;
		worldToBGRTransform.M44 = 1.0f;

		hResult = NuiFusionShadePointCloud( pPointCloudImageFrame, &worldToCameraTransform, &worldToBGRTransform, pSurfaceImageFrame, pNormalImageFrame );
		if( FAILED( hResult ) ){
			std::cerr << "Error : NuiFusionShadePointCloud" << std::endl;
			return -1;
		}

		cv::Mat surfaceMat( height, width, CV_8UC4, pSurfaceImageFrame->pFrameBuffer->pBits );
		cv::Mat normalMat( height, width, CV_8UC4, pNormalImageFrame->pFrameBuffer->pBits );

		cv::imshow( "Depth", depthMat );
		cv::imshow( "Surface", surfaceMat );
		cv::imshow( "Normal", normalMat );

		int key = cv::waitKey( 30 );
		if( key == VK_ESCAPE ){
			break;
		}
		else if( key == 'r' ){
			ResetReconstruction( pReconstruction, &worldToCameraTransform );
		}
	}

	SafeRelease( pDepthSource );
	SafeRelease( pDepthReader );
	SafeRelease( pDescription );
	SafeRelease( pCoordinateMapper );
	SafeRelease( pReconstruction );
	NuiFusionReleaseImageFrame( pDepthFloatImageFrame );
	NuiFusionReleaseImageFrame( pSmoothDepthFloatImageFrame );
	NuiFusionReleaseImageFrame( pPointCloudImageFrame );
	NuiFusionReleaseImageFrame( pSurfaceImageFrame );
	NuiFusionReleaseImageFrame( pNormalImageFrame );
	if( pSensor ){
		pSensor->Close();
	}
	SafeRelease( pSensor );
	cv::destroyAllWindows();

	return 0;
}
Beispiel #19
0
int main(int argc, char** argv)
{
   int first_time = 0;
   Size screen_size(1440, 900);//the dst image size,e.g.100x100
   Scalar text_color = Scalar(0, 255, 0);
   Scalar text_color2 = Scalar(0, 255, 255);
   Scalar text_color3 = Scalar(0, 0, 255);

   inhaler_coach coach;
   coach.control = 0;
   thread mThread(test_func, &coach);

   // 1a. Get Kinect Sensor
   cout << "Try to get default sensor" << endl;
   IKinectSensor* pSensor = nullptr;
   if (GetDefaultKinectSensor(&pSensor) != S_OK)
   {
      cerr << "Get Sensor failed" << endl;
      return -1;
   }

   // 1b. Open sensor
   cout << "Try to open sensor" << endl;
   if (pSensor->Open() != S_OK)
   {
      cerr << "Can't open sensor" << endl;
      return -1;
   }

   // 2. Color Related code
   IColorFrameReader* pColorFrameReader = nullptr;
   cv::Mat	mColorImg;
   UINT uBufferSize = 0;
   UINT uColorPointNum = 0;
   int iWidth = 0;
   int iHeight = 0;
   {
      // 2a. Get color frame source
      cout << "Try to get color source" << endl;
      IColorFrameSource* pFrameSource = nullptr;
      if (pSensor->get_ColorFrameSource(&pFrameSource) != S_OK)
      {
         cerr << "Can't get color frame source" << endl;
         return -1;
      }

      // 2b. Get frame description
      cout << "get color frame description" << endl;
      IFrameDescription* pFrameDescription = nullptr;
      if (pFrameSource->get_FrameDescription(&pFrameDescription) == S_OK)
      {
         pFrameDescription->get_Width(&iWidth);
         pFrameDescription->get_Height(&iHeight);
      }
      pFrameDescription->Release();
      pFrameDescription = nullptr;

      // 2c. get frame reader
      cout << "Try to get color frame reader" << endl;
      if (pFrameSource->OpenReader(&pColorFrameReader) != S_OK)
      {
         cerr << "Can't get color frame reader" << endl;
         return -1;
      }

      // 2d. release Frame source
      cout << "Release frame source" << endl;
      pFrameSource->Release();
      pFrameSource = nullptr;

      // Prepare OpenCV data
      mColorImg = cv::Mat(iHeight, iWidth, CV_8UC4);
      uBufferSize = iHeight * iWidth * 4 * sizeof(BYTE);
      uColorPointNum = iHeight * iWidth;
   }
   

   // 3. Depth related code
   IDepthFrameReader* pDepthFrameReader = nullptr;
   UINT uDepthPointNum = 0;
   int iDepthWidth = 0, iDepthHeight = 0;
   cout << "Try to get depth source" << endl;
   {
      // Get frame source
      IDepthFrameSource* pFrameSource = nullptr;
      if (pSensor->get_DepthFrameSource(&pFrameSource) != S_OK)
      {
         cerr << "Can't get depth frame source" << endl;
         return -1;
      }

      // Get frame description
      cout << "get depth frame description" << endl;
      IFrameDescription* pFrameDescription = nullptr;
      if (pFrameSource->get_FrameDescription(&pFrameDescription) == S_OK)
      {
         pFrameDescription->get_Width(&iDepthWidth);
         pFrameDescription->get_Height(&iDepthHeight);
         uDepthPointNum = iDepthWidth * iDepthHeight;
      }
      pFrameDescription->Release();
      pFrameDescription = nullptr;

      // get frame reader
      cout << "Try to get depth frame reader" << endl;
      if (pFrameSource->OpenReader(&pDepthFrameReader) != S_OK)
      {
         cerr << "Can't get depth frame reader" << endl;
         return -1;
      }

      // release Frame source
      cout << "Release frame source" << endl;
      pFrameSource->Release();
      pFrameSource = nullptr;
   }



   // 4. Body related code
   IBodyFrameReader* pBodyFrameReader = nullptr;
   IBody** aBodyData = nullptr;
   INT32 iBodyCount = 0;
   {
      // 3a. Get frame source
      cout << "Try to get body source" << endl;
      IBodyFrameSource* pFrameSource = nullptr;
      if (pSensor->get_BodyFrameSource(&pFrameSource) != S_OK)
      {
         cerr << "Can't get body frame source" << endl;
         return -1;
      }

      // 3b. Get the number of body
      if (pFrameSource->get_BodyCount(&iBodyCount) != S_OK)
      {
         cerr << "Can't get body count" << endl;
         return -1;
      }
      cout << " > Can trace " << iBodyCount << " bodies" << endl;
      aBodyData = new IBody*[iBodyCount];
      for (int i = 0; i < iBodyCount; ++i)
         aBodyData[i] = nullptr;

      // 3c. get frame reader
      cout << "Try to get body frame reader" << endl;
      if (pFrameSource->OpenReader(&pBodyFrameReader) != S_OK)
      {
         cerr << "Can't get body frame reader" << endl;
         return -1;
      }

      // 3d. release Frame source
      cout << "Release frame source" << endl;
      pFrameSource->Release();
      pFrameSource = nullptr;
   }
   


   // 4. Body Index releated code
   IBodyIndexFrameReader* pBIFrameReader = nullptr;
   cout << "Try to get body index source" << endl;
   {
      // Get frame source
      IBodyIndexFrameSource* pFrameSource = nullptr;
      if (pSensor->get_BodyIndexFrameSource(&pFrameSource) != S_OK)
      {
         cerr << "Can't get body index frame source" << endl;
         return -1;
      }

      // get frame reader
      cout << "Try to get body index frame reader" << endl;
      if (pFrameSource->OpenReader(&pBIFrameReader) != S_OK)
      {
         cerr << "Can't get depth frame reader" << endl;
         return -1;
      }

      // release Frame source
      cout << "Release frame source" << endl;
      pFrameSource->Release();
      pFrameSource = nullptr;
   }



   // 5. background
   cv::Mat imgBG(iHeight, iWidth, CV_8UC3);
   imgBG.setTo(0);







   // 4. get CoordinateMapper
   ICoordinateMapper* pCoordinateMapper = nullptr;
   if (pSensor->get_CoordinateMapper(&pCoordinateMapper) != S_OK)
   {
      cout << "Can't get coordinate mapper" << endl;
      return -1;
   }

   // Enter main loop
   UINT16* pDepthPoints = new UINT16[uDepthPointNum];
   BYTE*	pBodyIndex = new BYTE[uDepthPointNum];
   DepthSpacePoint* pPointArray = new DepthSpacePoint[uColorPointNum];

   cv::namedWindow("Inhaler Coach");
   while (true)
   {
      // 4a. Get last frame
      IColorFrame* pColorFrame = nullptr;
      if (pColorFrameReader->AcquireLatestFrame(&pColorFrame) == S_OK)
      {
         pColorFrame->CopyConvertedFrameDataToArray(uBufferSize, mColorImg.data, ColorImageFormat_Bgra);
         pColorFrame->Release();
         pColorFrame = nullptr;
      }
      cv::Mat mImg = mColorImg.clone();
     
      // 8b. read depth frame
      IDepthFrame* pDepthFrame = nullptr;
      if (pDepthFrameReader->AcquireLatestFrame(&pDepthFrame) == S_OK)
      {
         pDepthFrame->CopyFrameDataToArray(uDepthPointNum, pDepthPoints);
         pDepthFrame->Release();
         pDepthFrame = nullptr;
      }


      // 8c. read body index frame
      IBodyIndexFrame* pBIFrame = nullptr;
      if (pBIFrameReader->AcquireLatestFrame(&pBIFrame) == S_OK)
      {
         pBIFrame->CopyFrameDataToArray(uDepthPointNum, pBodyIndex);
         pBIFrame->Release();
         pBIFrame = nullptr;
      }

#ifdef COACH_DEBUG
      cv::Mat imgTarget = imgBG.clone();
      // 9b. map color to depth
      if (pCoordinateMapper->MapColorFrameToDepthSpace(uDepthPointNum, pDepthPoints, uColorPointNum, pPointArray) == S_OK)
      {
         for (int y = 0; y < imgTarget.rows; ++y)
         {
            for (int x = 0; x < imgTarget.cols; ++x)
            {
               // ( x, y ) in color frame = rPoint in depth frame
               const DepthSpacePoint& rPoint = pPointArray[y * imgTarget.cols + x];

               // check if rPoint is in range
               if (rPoint.X >= 0 && rPoint.X < iDepthWidth && rPoint.Y >= 0 && rPoint.Y < iDepthHeight)
               {
                  // fill color from color frame if this pixel is user
                  int iIdx = (int)rPoint.X + iDepthWidth * (int)rPoint.Y;
                  if (pBodyIndex[iIdx] < 6)
                  {
                     cv::Vec4b& rPixel = mImg.at<cv::Vec4b>(y, x);
                     imgTarget.at<cv::Vec3b>(y, x) = cv::Vec3b(rPixel[0], rPixel[1], rPixel[2]);
                  }
               }
            }
         }
      }
#else
   cv::Mat imgTarget = mImg.clone();
#endif




      // 4b. Get body data
      IBodyFrame* pBodyFrame = nullptr;
      if (pBodyFrameReader->AcquireLatestFrame(&pBodyFrame) == S_OK)
      {

         // 4b. get Body data
         if (pBodyFrame->GetAndRefreshBodyData(iBodyCount, aBodyData) == S_OK)
         {
            // 4c. for each body
            for (int i = 0; i < iBodyCount; ++i)
            {
               IBody* pBody = aBodyData[i];

               // check if is tracked
               BOOLEAN bTracked = false;
               if ((pBody->get_IsTracked(&bTracked) == S_OK) && bTracked)
               {
                  // get joint position
                  Joint aJoints[JointType::JointType_Count];
                  if (pBody->GetJoints(JointType::JointType_Count, aJoints) == S_OK)
                  {
                     
                     if (coach.state == 0){
                        coach.state = 1;
                        if (first_time == 0){
                           first_time = 1;
                           PlaySound(TEXT("welcome.wav"), NULL, SND_FILENAME);
                        }
                     }
#ifdef COACH_DEBUG               
                     DrawLine(imgTarget, aJoints[JointType_SpineBase], aJoints[JointType_SpineMid], pCoordinateMapper);
                     DrawLine(imgTarget, aJoints[JointType_SpineMid], aJoints[JointType_SpineShoulder], pCoordinateMapper);
                     DrawLine(imgTarget, aJoints[JointType_SpineShoulder], aJoints[JointType_Neck], pCoordinateMapper);
                     DrawLine(imgTarget, aJoints[JointType_Neck], aJoints[JointType_Head], pCoordinateMapper);

                     DrawLine(imgTarget, aJoints[JointType_SpineShoulder], aJoints[JointType_ShoulderLeft], pCoordinateMapper);
                     DrawLine(imgTarget, aJoints[JointType_ShoulderLeft], aJoints[JointType_ElbowLeft], pCoordinateMapper);
                     DrawLine(imgTarget, aJoints[JointType_ElbowLeft], aJoints[JointType_WristLeft], pCoordinateMapper);
                     DrawLine(imgTarget, aJoints[JointType_WristLeft], aJoints[JointType_HandLeft], pCoordinateMapper);
                     DrawLine(imgTarget, aJoints[JointType_HandLeft], aJoints[JointType_HandTipLeft], pCoordinateMapper);
                     //DrawLine(imgTarget, aJoints[JointType_HandLeft], aJoints[JointType_ThumbLeft], pCoordinateMapper);

                     DrawLine(imgTarget, aJoints[JointType_SpineShoulder], aJoints[JointType_ShoulderRight], pCoordinateMapper);
                     DrawLine(imgTarget, aJoints[JointType_ShoulderRight], aJoints[JointType_ElbowRight], pCoordinateMapper);
                     DrawLine(imgTarget, aJoints[JointType_ElbowRight], aJoints[JointType_WristRight], pCoordinateMapper);
                     DrawLine(imgTarget, aJoints[JointType_WristRight], aJoints[JointType_HandRight], pCoordinateMapper);
                     DrawLine(imgTarget, aJoints[JointType_HandRight], aJoints[JointType_HandTipRight], pCoordinateMapper);
                     //DrawLine(imgTarget, aJoints[JointType_HandRight], aJoints[JointType_ThumbRight], pCoordinateMapper);
#endif
                     ColorSpacePoint q;
                     ColorSpacePoint head;
                     //ColorSpacePoint w;

                     pCoordinateMapper->MapCameraPointToColorSpace(aJoints[JointType_Head].Position, &head);
                     // check shaking
                     coach.shaking_detection(aJoints, pCoordinateMapper);
                     q = coach.position_checking(aJoints, pCoordinateMapper);
#ifdef COACH_DEBUG
                     circle(imgTarget, cv::Point(q.X, q.Y), 10, Scalar(0, 255, 255), 10, 8, 0);
                     //circle(imgTarget, cv::Point(q.X, q.Y), 10, Scalar(0, 255, 255), 10, 8, 0);
                     rectangle(imgTarget, Point(head.X - 50, head.Y - 40), Point(head.X + 50, head.Y + 90), Scalar(0, 255, 255), 1, 8, 0);
                     //circle(imgTarget, cv::Point(w.X, w.Y), 10, Scalar(255, 0, 255), 10, 8, 0);
#endif
                     coach.state_change_rule();
                  }
               }               
            }
         }
         else
         {
            cerr << "Can't read body data" << endl;
         }

         // 4e. release frame
         pBodyFrame->Release();
      }


      switch (coach.state){
         case 0: putText(imgTarget, "CMU Inhaler Coaching System", Point(120, 120), FONT_HERSHEY_DUPLEX, 2, text_color); break;
         case 1: putText(imgTarget, "Please shake the inhaler", Point(20, 120), FONT_HERSHEY_DUPLEX, 2, text_color2); break;
         case 2: putText(imgTarget, "Shaking detected", Point(20, 120), FONT_HERSHEY_DUPLEX, 2, text_color2); break;
         case 3: putText(imgTarget, "Please put the inhaler in front of your mouth", Point(20, 120), FONT_HERSHEY_DUPLEX, 2, text_color2); break;
         case 4: putText(imgTarget, "Position check OK", Point(20, 120), FONT_HERSHEY_DUPLEX, 2, text_color2); break;
         case 5: putText(imgTarget, "You forget to shake the inhaler first!!!", Point(20, 120), FONT_HERSHEY_DUPLEX, 2, text_color3); break;
      }

      // show image
      Mat dst;
      resize(imgTarget, dst, screen_size);
      imshow("Coach", dst);
      // 4c. check keyboard input
      if (cv::waitKey(30) == VK_ESCAPE){
         break;
      }
   }
   mThread.join();
   
   // 3. delete body data array
   delete[] aBodyData;

   // 3. release frame reader
   cout << "Release body frame reader" << endl;
   pBodyFrameReader->Release();
   pBodyFrameReader = nullptr;

   // 2. release color frame reader
   cout << "Release color frame reader" << endl;
   pColorFrameReader->Release();
   pColorFrameReader = nullptr;

   // 1c. Close Sensor
   cout << "close sensor" << endl;
   pSensor->Close();

   // 1d. Release Sensor
   cout << "Release sensor" << endl;
   pSensor->Release();
   pSensor = nullptr;

   return 0;
}
Beispiel #20
0
int _tmain( int argc, _TCHAR* argv[] )
{
	cv::setUseOptimized( true );

	// Sensor
	IKinectSensor* pSensor;
	HRESULT hResult = S_OK;
	hResult = GetDefaultKinectSensor( &pSensor );
	if( FAILED( hResult ) ){
		std::cerr << "Error : GetDefaultKinectSensor" << std::endl;
		return -1;
	}

	hResult = pSensor->Open();
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::Open()" << std::endl;
		return -1;
	}

	// Source
	IDepthFrameSource* pDepthSource;
	hResult = pSensor->get_DepthFrameSource( &pDepthSource );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::get_DepthFrameSource()" << std::endl;
		return -1;
	}

	// Reader
	IDepthFrameReader* pDepthReader;
	hResult = pDepthSource->OpenReader( &pDepthReader );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IDepthFrameSource::OpenReader()" << std::endl;
		return -1;
	}

	// Description
	IFrameDescription* pDescription;
	hResult = pDepthSource->get_FrameDescription( &pDescription );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IDepthFrameSource::get_FrameDescription()" << std::endl;
		return -1;
	}

	int width = 0;
	int height = 0;
	pDescription->get_Width( &width ); // 512
	pDescription->get_Height( &height ); // 424
	unsigned int bufferSize = width * height * sizeof( unsigned short );

	cv::Mat bufferMat( height, width, CV_16SC1 );
	cv::Mat depthMat( height, width, CV_8UC1 );
	cv::namedWindow( "Depth" );

	while( 1 ){
		// Frame
		IDepthFrame* pDepthFrame = nullptr;
		hResult = pDepthReader->AcquireLatestFrame( &pDepthFrame );
		if( SUCCEEDED( hResult ) ){
			hResult = pDepthFrame->AccessUnderlyingBuffer( &bufferSize, reinterpret_cast<UINT16**>( &bufferMat.data ) );
			if( SUCCEEDED( hResult ) ){
				bufferMat.convertTo( depthMat, CV_8U, -255.0f / 4500.0f, 255.0f );
			}
		}
	
		SafeRelease( pDepthFrame );

		cv::imshow( "Depth", depthMat );

		if( cv::waitKey( 30 ) == VK_ESCAPE ){
			break;
		}
	}

	SafeRelease( pDepthSource );
	SafeRelease( pDepthReader );
	SafeRelease( pDescription );
	if( pSensor ){
		pSensor->Close();
	}
	SafeRelease( pSensor );
	cv::destroyAllWindows();

	return 0;
}
Beispiel #21
0
/// Initializes the default Kinect sensor
HRESULT CBodyBasics::InitializeDefaultSensor()
{
	//用于判断每次读取操作的成功与否
	HRESULT hr;

	//搜索kinect
	hr = GetDefaultKinectSensor(&m_pKinectSensor);
	if (FAILED(hr)){
		return hr;
	}

	//找到kinect设备
	if (m_pKinectSensor)
	{
		// Initialize the Kinect and get coordinate mapper and the body reader
		IBodyFrameSource* pBodyFrameSource = NULL;//读取骨架
		IDepthFrameSource* pDepthFrameSource = NULL;//读取深度信息
		IBodyIndexFrameSource* pBodyIndexFrameSource = NULL;//读取背景二值图

		//打开kinect
		hr = m_pKinectSensor->Open();

		//coordinatemapper
		if (SUCCEEDED(hr))
		{
			hr = m_pKinectSensor->get_CoordinateMapper(&m_pCoordinateMapper);
		}

		//bodyframe
		if (SUCCEEDED(hr))
		{
			hr = m_pKinectSensor->get_BodyFrameSource(&pBodyFrameSource);
		}

		if (SUCCEEDED(hr))
		{
			hr = pBodyFrameSource->OpenReader(&m_pBodyFrameReader);
		}

		//depth frame
		if (SUCCEEDED(hr)){
			hr = m_pKinectSensor->get_DepthFrameSource(&pDepthFrameSource);
		}

		if (SUCCEEDED(hr)){
			hr = pDepthFrameSource->OpenReader(&m_pDepthFrameReader);
		}

		//body index frame
		if (SUCCEEDED(hr)){
			hr = m_pKinectSensor->get_BodyIndexFrameSource(&pBodyIndexFrameSource);
		}

		if (SUCCEEDED(hr)){
			hr = pBodyIndexFrameSource->OpenReader(&m_pBodyIndexFrameReader);
		}

		SafeRelease(pBodyFrameSource);
		SafeRelease(pDepthFrameSource);
		SafeRelease(pBodyIndexFrameSource);
	}

	if (!m_pKinectSensor || FAILED(hr))
	{
		std::cout << "Kinect initialization failed!" << std::endl;
		return E_FAIL;
	}

	//skeletonImg,用于画骨架、背景二值图的MAT
	skeletonImg.create(cDepthHeight, cDepthWidth, CV_8UC3);
	skeletonImg.setTo(0);

	//depthImg,用于画深度信息的MAT
	depthImg.create(cDepthHeight, cDepthWidth, CV_8UC1);
	depthImg.setTo(0);

	return hr;
}
int main(int argc, char* argv[])
{


	int similarity = 0;

	string line;
	ifstream myfile ("source_config.txt");
	if (myfile.is_open())
	{
		getline (myfile,line);
		hauteurCamera = stoi(line);
		getline (myfile,line);
		fountainXPosition = stoi(line);
		getline (myfile,line);
		fountainYPosition = stoi(line);
		getline (myfile,line);
		fountainWidth = stoi(line);

		getline (myfile,line);
		blasterWidth = stoi(line);
		getline (myfile,line);
		int numberOfBlaster = stoi(line);

		for(int i = 0;i<numberOfBlaster;i++){
			getline (myfile,line);
			blasterXPosition.push_back(stoi(line));
			getline (myfile,line);
			blasterYPosition.push_back(stoi(line));
		}

		myfile.close();
	}

	else
	{
		cout << "Unable to open file"; 
		exit(-1);
	}

	IKinectSensor* m_pKinectSensor;
	IDepthFrameReader* pDepthReader;
	IDepthFrameSource* pDepthFrameSource = NULL; // Depth image

	IColorFrameReader* pColorReader;
	IColorFrameSource* pColorFrameSource = NULL;


	HRESULT hr;

	hr = GetDefaultKinectSensor(&m_pKinectSensor);
	if (FAILED(hr)){
		cout << "ColorTrackerv2 Error : GetDefaultKinectSensor failed." << endl;
		return false;
	}

	hr = m_pKinectSensor->Open();
	if (FAILED(hr)){
		cout << "ColorTrackerv2 Error : Open failed." << endl;
		return false;
	}

	hr = m_pKinectSensor->get_DepthFrameSource( &pDepthFrameSource );
	if (FAILED(hr)){
		cout << "ColorTrackerv2 Error : get_DepthFrameSource failed." << endl;
		return false;
	}
	hr = pDepthFrameSource->OpenReader( &pDepthReader );
	if (FAILED(hr)){
		cout << "ColorTrackerv2 Error : OpenReader failed." << endl;
		return false;
	}


	IFrameDescription* pDepthDescription;
	hr = pDepthFrameSource->get_FrameDescription( &pDepthDescription );
	if( FAILED( hr ) ){
		std::cerr << "Error : IDepthFrameSource::get_FrameDescription()" << std::endl;
		return -1;
	}
	w = 0,h = 0;
	pDepthDescription->get_Width( &w ); // 512
	pDepthDescription->get_Height( &h ); // 424
	//unsigned int irBufferSize = w * h * sizeof( unsigned short );


	hr = m_pKinectSensor->get_ColorFrameSource( &pColorFrameSource );
	if (FAILED(hr)){
		cout << "ColorTrackerv2 Error : get_ColorFrameSource failed." << endl;
		return false;
	}
	hr = pColorFrameSource->OpenReader( &pColorReader );
	if (FAILED(hr)){
		cout << "ColorTrackerv2 Error : OpenReader failed." << endl;
		return false;
	}

	// Description
	IFrameDescription* pColorDescription;
	hr = pColorFrameSource->get_FrameDescription( &pColorDescription );
	if( FAILED( hr ) ){
		std::cerr << "Error : IColorFrameSource::get_FrameDescription()" << std::endl;
		return -1;
	}
	int colorWidth = 0;
	int colorHeight = 0;
	pColorDescription->get_Width( &colorWidth ); // 1920
	pColorDescription->get_Height( &colorHeight ); // 1080
	unsigned int colorBufferSize = colorWidth * colorHeight * 4 * sizeof( unsigned char );

	Mat colorBufferMat = Mat::zeros( cvSize(colorWidth,colorHeight), CV_8UC4   );
	//colorMat = Mat::zeros( cvSize(colorWidth/2,colorHeight/2), CV_8UC4   );



	ICoordinateMapper* pCoordinateMapper;
	hr = m_pKinectSensor->get_CoordinateMapper( &pCoordinateMapper );



	//  open a opencv window
	cv::namedWindow("source_config", CV_WINDOW_AUTOSIZE );
	setMouseCallback("source_config", MouseCallBackFunc, NULL);

	Mat frame(h,w, CV_8UC3, Scalar(255,255,255));
	Mat display;
	//Mat img;

	char k = 0;
	while(k!=27){

		HRESULT hResult = S_OK;

		if(displayColor){
			IColorFrame* pColorFrame = nullptr;
			hResult = pColorReader->AcquireLatestFrame( &pColorFrame );
			while(!SUCCEEDED(hResult)){
				Sleep(50);
				hResult = pColorReader->AcquireLatestFrame(&pColorFrame);
			}

			if( SUCCEEDED( hResult ) ){
				hResult = pColorFrame->CopyConvertedFrameDataToArray( colorBufferSize, reinterpret_cast<BYTE*>( colorBufferMat.data ), ColorImageFormat::ColorImageFormat_Bgra );
				if( !SUCCEEDED( hResult ) ){
					return false;
				}

				resize(colorBufferMat,display,Size(displaySize*w,displaySize*h));

				flip(display,display,1);

				cv::line(display,Point(displaySize*w/2,0),Point(displaySize*w/2,displaySize*h),Scalar(0,0,255),2);
				cv::line(display,Point(0,displaySize*h/2),Point(displaySize*w,displaySize*h/2),Scalar(0,0,255),2);


				if (pColorFrame )
				{
					pColorFrame ->Release();
					pColorFrame  = NULL;
				}



			}
			else 
				return false;



		}
		else
		{

			IDepthFrame* pDepthFrame = nullptr;
			hResult = pDepthReader->AcquireLatestFrame( &pDepthFrame );
			while(!SUCCEEDED(hResult)){
				Sleep(10);
				hResult = pDepthReader->AcquireLatestFrame( &pDepthFrame );
			}

			if( SUCCEEDED( hResult ) ){
				unsigned int bufferSize = 0;
				unsigned short* buffer = nullptr;
				hResult = pDepthFrame->AccessUnderlyingBuffer( &bufferSize, &buffer );

				if( SUCCEEDED( hResult ) ){
					for( int y = 0; y < h; y++ ){
						for( int x = 0; x < w; x++ ){
							Vec3b intensity = frame.at<Vec3b>(y, x);
							if(buffer[ y * w + (w - x - 1) ]  < hauteurCamera){
								int d = buffer[ y * w + (w - x - 1) ];
								intensity.val[0] = 2.55*(d % 100);
								intensity.val[1] = 1.22*(d % 200);
								intensity.val[2] = 256.0*d/hauteurCamera;
							}
							else
							{
								intensity.val[0] = 255;
								intensity.val[1] = 255;
								intensity.val[2] = 255;
							}
							/*intensity.val[0] = buffer[ y * w + x ] >> 8;
							intensity.val[1] = buffer[ y * w + x ] >> 8;
							intensity.val[2] = buffer[ y * w + x ] >> 8;*/
							frame.at<Vec3b>(y, x) = intensity;
						}
					}

					// changer la couleur du rectangle en fonction de la hauteur des coins (similaire ou non) ( moins de 4cm)

					float d1 = buffer[(int)(fountainYPosition-fountainWidth/2.0)*w + (w-1-(int)(fountainXPosition-fountainWidth/2.0))];
					float d2 = buffer[(int)(fountainYPosition-fountainWidth/2.0)*w + (w-1-(int)(fountainXPosition+fountainWidth/2.0))];
					float d3 = buffer[(int)(fountainYPosition+fountainWidth/2.0)*w + (w-1-(int)(fountainXPosition-fountainWidth/2.0))];
					float d4 = buffer[(int)(fountainYPosition+fountainWidth/2.0)*w + (w-1-(int)(fountainXPosition+fountainWidth/2.0))];

					if((d1 < 0)||(d1>3500)||(d2 < 0)||(d2>3500)||(d3 < 0)||(d3>3500)||(d4 < 0)||(d4>3500)){
						similarity = 0;
					}
					else
					{
						int mn = 100;
						if((abs(d1-d2) < mn)
							&&(abs(d1-d3) < mn)
							&&(abs(d1-d4) < mn)
							&&(abs(d2-d3) < mn)
							&&(abs(d2-d4) < mn)
							&&(abs(d3-d4) < mn))
							similarity = 255;
						else{
							int md = abs(d1-d2);
							md = MAX(md,abs(d1-d3));
							md = MAX(md,abs(d1-d4));
							md = MAX(md,abs(d2-d3));
							md = MAX(md,abs(d2-d4));
							md = MAX(md,abs(d3-d4));
							if(md-mn>128)
								similarity = 0;
							else
								similarity = 128 - (md - mn);
						}
					}


					if(k=='s'){

						// get hauteur camera

						// Depthframe get 3D position of 1m20 jets et les enregistrer dans un autre fichier pour etre charger par un Observer


						CameraSpacePoint cameraPoint = { 0 };
						DepthSpacePoint depthPoint = { 0 };
						UINT16 depth;

						// Compute hauteur camera, by average of four points
						float h = 0;
						int cptH = 0;
						float d;

						d = buffer[(int)(fountainYPosition-fountainWidth/2.0)*w + (w-1-(int)(fountainXPosition-fountainWidth/2.0))];
						if((d>500)&&(d<3500)){h+=d; cptH++;}
						d = buffer[(int)(fountainYPosition-fountainWidth/2.0)*w + (w-1-(int)(fountainXPosition+fountainWidth/2.0))];
						if((d>500)&&(d<3500)){h+=d; cptH++;}
						d = buffer[(int)(fountainYPosition+fountainWidth/2.0)*w + (w-1-(int)(fountainXPosition-fountainWidth/2.0))];
						if((d>500)&&(d<3500)){h+=d; cptH++;}
						d = buffer[(int)(fountainYPosition+fountainWidth/2.0)*w + (w-1-(int)(fountainXPosition+fountainWidth/2.0))];
						if((d>500)&&(d<3500)){h+=d; cptH++;}
						if(cptH>0){
							//hauteurCamera = h/cptH;
							cout << "H = " << hauteurCamera << endl;
						}

						// Compute real size of blaster
						/*depthPoint.X = static_cast<float>(blasterXPosition[0] - blasterWidth); 
						depthPoint.Y = static_cast<float>(blasterYPosition[0] - blasterWidth); 
						depth = hauteurCamera - 1000.0f; // TODO change
						pCoordinateMapper->MapDepthPointToCameraSpace(depthPoint,depth,&cameraPoint);
						float corner1X = static_cast<float>(cameraPoint.X);
						float corner1Y = static_cast<float>(cameraPoint.Y);


						depthPoint.X = static_cast<float>(blasterXPosition[0] + blasterWidth); 
						depthPoint.Y = static_cast<float>(blasterYPosition[0] + blasterWidth); 
						pCoordinateMapper->MapDepthPointToCameraSpace(depthPoint,depth,&cameraPoint);
						float corner2X = static_cast<float>(cameraPoint.X);
						float corner2Y = static_cast<float>(cameraPoint.Y);*/

						//float realBlasterWidth = 1000.0*(abs(corner2X-corner1X)+abs(corner2Y-corner1Y))/2.0;


						ofstream myfile;
						myfile.open ("source_config.txt");
						myfile << hauteurCamera << "\n";
						myfile << fountainXPosition << "\n";
						myfile << fountainYPosition << "\n";
						myfile << fountainWidth << "\n";
						myfile << blasterWidth << "\n";
						myfile << blasterXPosition.size() << "\n";
						for(int i = 0;i<blasterXPosition.size();i++){
							myfile << blasterXPosition[i] << "\n";
							myfile << blasterYPosition[i] << "\n";
						}
						myfile.close();

						//  save real positions to file

						myfile.open ("source3D.txt");
						myfile << hauteurCamera << "\n";
						myfile << blasterWidth << "\n";
						myfile << blasterXPosition.size() << "\n";
						for(int i = 0;i<blasterXPosition.size();i++){
							depthPoint.X = static_cast<float>(blasterXPosition[i]); 
							depthPoint.Y = static_cast<float>(blasterYPosition[i]); 
							depth = hauteurCamera;
							pCoordinateMapper->MapDepthPointToCameraSpace(depthPoint,depth,&cameraPoint);
							cout << depthPoint.X << " - " << depthPoint.Y << endl;
							cout << static_cast<float>(cameraPoint.X) << " - " << static_cast<float>(cameraPoint.Y) << endl;
							pCoordinateMapper->MapCameraPointToDepthSpace(cameraPoint, &depthPoint);
							cout << depthPoint.X << " - " << depthPoint.Y << endl;
							cout << static_cast<float>(cameraPoint.X) << " - " << static_cast<float>(cameraPoint.Y) << endl;
							myfile << 1000.0*static_cast<float>(cameraPoint.X) << "\n";
							myfile << 1000.0*static_cast<float>(cameraPoint.Y) << "\n";
						}
						myfile.close();


					}


				}
				else{
					return false;
				}

			}
			else 
				return false;

			if( pDepthFrame != NULL ){
				pDepthFrame->Release();
				pDepthFrame = NULL;
			}

			rectangle(frame,
				Rect(fountainXPosition-fountainWidth/2.0, 
				fountainYPosition-fountainWidth/2.0, 
				fountainWidth	, 
				fountainWidth),
				Scalar(0,similarity,255-similarity),
				3);
			for(int i = -2; i <= 2; i++)
			{
				rectangle(frame,
					Rect(fountainXPosition - (i*blasterWidth) - (blasterWidth/2.0), 
					fountainYPosition-fountainWidth/2.0, 
					blasterWidth, 
					fountainWidth),
					Scalar(0,255,0),
					1);
				rectangle(frame,
					Rect(fountainXPosition-fountainWidth/2.0,
					fountainYPosition - (i*blasterWidth) - (blasterWidth/2.0),
					fountainWidth, 
					blasterWidth),
					Scalar(0,255,0),
					1);
			}

			char textbuffer [33];
			for(int i = 0;i<blasterXPosition.size();i++){
				sprintf(textbuffer,"%i",i+1);
				putText(frame,textbuffer, Point2f(blasterXPosition[i]-blasterWidth/2.0,blasterYPosition[i]), FONT_HERSHEY_PLAIN, 1,  Scalar(0,0,255,255), 2);
				//rectangle(frame,Rect(blasterXPosition[i]-blasterWidth/2.0,blasterYPosition[i]-blasterWidth/2.0,blasterWidth,blasterWidth),Scalar(255,0,0));
				circle(frame,Point(blasterXPosition[i],blasterYPosition[i]),blasterWidth/2.0,Scalar(255,0,0));
			}
			resize(frame,display,Size(displaySize*w,displaySize*h));
		}

		cv::imshow("source_config", display);


		k=cv::waitKey(1);	

		if(k == 32) // Space
			displayColor = ! displayColor;
		
		if(k=='a')
			alignBuseFromLayout();
		if(k=='r')
			resetFountainPosition();

	}


	if (pDepthReader)
	{
		pDepthReader->Release();
		pDepthReader = NULL;
	}


	if (pCoordinateMapper)
	{
		pCoordinateMapper->Release();
		pCoordinateMapper = NULL;
	}

	m_pKinectSensor->Close();
	if (m_pKinectSensor)
	{
		m_pKinectSensor->Release();
		m_pKinectSensor = NULL;
	}


	//system("pause");

	return 0;
}