// コンストラクタ KinectV2::KinectV2() { // センサを取得する if (sensor == NULL && GetDefaultKinectSensor(&sensor) == S_OK) { HRESULT hr; // センサの使用を開始する hr = sensor->Open(); assert(hr == S_OK); // デプスデータの読み込み設定 IDepthFrameSource *depthSource; hr = sensor->get_DepthFrameSource(&depthSource); assert(hr == S_OK); hr = depthSource->OpenReader(&depthReader); assert(hr == S_OK); IFrameDescription *depthDescription; hr = depthSource->get_FrameDescription(&depthDescription); assert(hr == S_OK); depthSource->Release(); // デプスデータのサイズを得る depthDescription->get_Width(&depthWidth); depthDescription->get_Height(&depthHeight); depthDescription->Release(); // カラーデータの読み込み設定 IColorFrameSource *colorSource; hr = sensor->get_ColorFrameSource(&colorSource); assert(hr == S_OK); hr = colorSource->OpenReader(&colorReader); assert(hr == S_OK); IFrameDescription *colorDescription; hr = colorSource->get_FrameDescription(&colorDescription); assert(hr == S_OK); colorSource->Release(); // カラーデータのサイズを得る colorDescription->get_Width(&colorWidth); colorDescription->get_Height(&colorHeight); colorDescription->Release(); // 座標のマッピング hr = sensor->get_CoordinateMapper(&coordinateMapper); assert(hr == S_OK); // depthCount と colorCount を計算してテクスチャとバッファオブジェクトを作成する makeTexture(); // デプスデータからカメラ座標を求めるときに用いる一時メモリを確保する position = new GLfloat[depthCount][3]; // カラーデータを変換する用いる一時メモリを確保する color = new GLubyte[colorCount * 4]; } }
void KinectCapture::GetDepthFrame(IMultiSourceFrame* pMultiFrame) { IDepthFrameReference* pDepthFrameReference = NULL; IDepthFrame* pDepthFrame = NULL; pMultiFrame->get_DepthFrameReference(&pDepthFrameReference); HRESULT hr = pDepthFrameReference->AcquireFrame(&pDepthFrame); if (SUCCEEDED(hr)) { if (pDepth == NULL) { IFrameDescription* pFrameDescription = NULL; hr = pDepthFrame->get_FrameDescription(&pFrameDescription); pFrameDescription->get_Width(&nDepthFrameWidth); pFrameDescription->get_Height(&nDepthFrameHeight); pDepth = new UINT16[nDepthFrameHeight * nDepthFrameWidth]; SafeRelease(pFrameDescription); } UINT nBufferSize = nDepthFrameHeight * nDepthFrameWidth; hr = pDepthFrame->CopyFrameDataToArray(nBufferSize, pDepth); } SafeRelease(pDepthFrame); SafeRelease(pDepthFrameReference); }
//---------- void Color::update(IColorFrame * frame) { this->isFrameNewFlag = true; IFrameDescription * frameDescription = NULL; try { //allocate pixels and texture if we need to if (FAILED(frame->get_FrameDescription(&frameDescription))) { throw Exception("Failed to get frame description"); } int width, height; if (FAILED(frameDescription->get_Width(&width)) || FAILED(frameDescription->get_Height(&height))) { throw Exception("Failed to get width and height of frame"); } if (width != this->pixels.getWidth() || height != this->texture.getHeight()) { this->pixels.allocate(width, height, OF_IMAGE_COLOR_ALPHA); this->texture.allocate(this->pixels); } //update local rgba image if (FAILED(frame->CopyConvertedFrameDataToArray(this->pixels.size(), this->pixels.getPixels(), ColorImageFormat_Rgba))) { throw Exception("Couldn't pull pixel buffer"); } if (this->useTexture) { this->texture.loadData(this->pixels); } //update yuv if (this->yuvPixelsEnabled) { if (width != this->yuvPixels.getWidth() || height != this->yuvPixels.getHeight()) { this->yuvPixels.allocate(width, height, OF_PIXELS_YUY2); } if (FAILED(frame->CopyRawFrameDataToArray(this->yuvPixels.size(), this->yuvPixels.getPixels()))) { throw Exception("Couldn't pull raw YUV pixel buffer"); } } //update field of view if (FAILED(frameDescription->get_HorizontalFieldOfView(&this->horizontalFieldOfView))) { throw Exception("Failed to get horizonal field of view"); } if (FAILED(frameDescription->get_VerticalFieldOfView(&this->verticalFieldOfView))) { throw Exception("Failed to get vertical field of view"); } if (FAILED(frameDescription->get_DiagonalFieldOfView(&this->diagonalFieldOfView))) { throw Exception("Failed to get diagonal field of view"); } IColorCameraSettings * cameraSettings; if (FAILED(frame->get_ColorCameraSettings(&cameraSettings))) { throw Exception("Failed to get color camera settings"); } cameraSettings->get_ExposureTime(&this->exposure); cameraSettings->get_FrameInterval(&this->frameInterval); cameraSettings->get_Gain(&this->gain); cameraSettings->get_Gamma(&this->gamma); } catch (std::exception & e) { OFXKINECTFORWINDOWS2_ERROR << e.what(); } SafeRelease(frameDescription); }
HRESULT KinectHDFaceGrabber::initColorFrameReader() { IColorFrameSource* pColorFrameSource = nullptr; HRESULT hr = m_pKinectSensor->get_ColorFrameSource(&pColorFrameSource); if (SUCCEEDED(hr)){ hr = pColorFrameSource->OpenReader(&m_pColorFrameReader); } IFrameDescription* pFrameDescription = nullptr; if (SUCCEEDED(hr)) { hr = pColorFrameSource->get_FrameDescription(&pFrameDescription); } if (SUCCEEDED(hr)) { hr = pFrameDescription->get_Width(&m_colorWidth); } if (SUCCEEDED(hr)) { hr = pFrameDescription->get_Height(&m_colorHeight); } if (SUCCEEDED(hr)){ m_colorBuffer.resize(m_colorHeight * m_colorWidth); } SafeRelease(pFrameDescription); SafeRelease(pColorFrameSource); return hr; }
void Microsoft2Grabber::BodyIndexFrameArrived(IBodyIndexFrameReference* pBodyIndexFrameReference) { IBodyIndexFrame* pBodyIndexFrame = NULL; HRESULT hr = pBodyIndexFrameReference->AcquireFrame(&pBodyIndexFrame); if(FAILED(hr)) return; //cout << "got a body index frame" << endl; IFrameDescription* pBodyIndexFrameDescription = NULL; int nBodyIndexWidth = 0; int nBodyIndexHeight = 0; UINT nBodyIndexBufferSize = 0; BYTE *pBodyIndexBuffer = NULL; // get body index frame data if (SUCCEEDED(hr)) { hr = pBodyIndexFrame->get_FrameDescription(&pBodyIndexFrameDescription); } if (SUCCEEDED(hr)) { hr = pBodyIndexFrameDescription->get_Width(&nBodyIndexWidth); } if (SUCCEEDED(hr)) { hr = pBodyIndexFrameDescription->get_Height(&nBodyIndexHeight); } if (SUCCEEDED(hr)) { hr = pBodyIndexFrame->AccessUnderlyingBuffer(&nBodyIndexBufferSize, &pBodyIndexBuffer); } SafeRelease(pBodyIndexFrameDescription); SafeRelease(pBodyIndexFrame); }
KinectColor::KinectColor(IKinectSensor *m_pKinectSensor) { width = new int(); height = new int(); bufferSize = new unsigned int(); HRESULT hr; IColorFrameSource* pColorFrameSource = NULL; hr = m_pKinectSensor->get_ColorFrameSource(&pColorFrameSource); if (SUCCEEDED(hr)) { hr = pColorFrameSource->OpenReader(&m_pColorFrameReader); } IFrameDescription* pDescription; hr = pColorFrameSource->get_FrameDescription(&pDescription); if (SUCCEEDED(hr)) { pDescription->get_Width(width); pDescription->get_Height(height); *bufferSize = *width * *height * 4 * sizeof(unsigned char); bufferMat = new cv::Mat(*height, *width, CV_8UC4); colorMat = new cv::Mat(HEIGHT, WIDTH, CV_8UC4); memset(&baseImage, 0, sizeof(BASEIMAGE)); CreateXRGB8ColorData(&baseImage.ColorData); baseImage.MipMapCount = 0; handle = -1; } SafeRelease(pColorFrameSource); }
HRESULT KinectHDFaceGrabber::initDepthFrameReader() { IDepthFrameSource* depthFrameSource = nullptr; HRESULT hr = m_pKinectSensor->get_DepthFrameSource(&depthFrameSource); IFrameDescription* frameDescription = nullptr; if (SUCCEEDED(hr)){ hr = depthFrameSource->get_FrameDescription(&frameDescription); } if (SUCCEEDED(hr)){ hr = frameDescription->get_Width(&m_depthWidth); } if (SUCCEEDED(hr)){ hr = frameDescription->get_Height(&m_depthHeight); } if (SUCCEEDED(hr)){ m_depthBuffer.resize(m_depthHeight * m_depthWidth); } SafeRelease(frameDescription); if (SUCCEEDED(hr)){ hr = depthFrameSource->OpenReader(&m_pDepthFrameReader); } SafeRelease(depthFrameSource); return hr; }
void KinectCapture::GetColorFrame(IMultiSourceFrame* pMultiFrame) { IColorFrameReference* pColorFrameReference = NULL; IColorFrame* pColorFrame = NULL; pMultiFrame->get_ColorFrameReference(&pColorFrameReference); HRESULT hr = pColorFrameReference->AcquireFrame(&pColorFrame); if (SUCCEEDED(hr)) { if (pColorRGBX == NULL) { IFrameDescription* pFrameDescription = NULL; hr = pColorFrame->get_FrameDescription(&pFrameDescription); hr = pFrameDescription->get_Width(&nColorFrameWidth); hr = pFrameDescription->get_Height(&nColorFrameHeight); pColorRGBX = new RGB[nColorFrameWidth * nColorFrameHeight]; SafeRelease(pFrameDescription); } UINT nBufferSize = nColorFrameWidth * nColorFrameHeight * sizeof(RGB); hr = pColorFrame->CopyConvertedFrameDataToArray(nBufferSize, reinterpret_cast<BYTE*>(pColorRGBX), ColorImageFormat_Bgra); } SafeRelease(pColorFrame); SafeRelease(pColorFrameReference); }
cv::Mat capKinect::update(cv::Mat& depth_show) { if (!m_pDepthReader) return cv::Mat(); IDepthFrame* pDepthFrame = NULL; HRESULT hr = m_pDepthReader->AcquireLatestFrame(&pDepthFrame); cv::Mat re; if (SUCCEEDED(hr)) { IFrameDescription* pFrameDescription = NULL; int nWidth = 0; int nHeight = 0; USHORT nDepthMinReliableDistance = 0; USHORT nDepthMaxDistance = 0; UINT nBufferSize = 0; UINT16 *pBuffer = NULL; if (SUCCEEDED(hr)) { hr = pDepthFrame->get_FrameDescription(&pFrameDescription); } if (SUCCEEDED(hr)) { hr = pFrameDescription->get_Width(&nWidth); } if (SUCCEEDED(hr)) { hr = pFrameDescription->get_Height(&nHeight); } if (SUCCEEDED(hr)) { hr = pDepthFrame->get_DepthMinReliableDistance(&nDepthMinReliableDistance); } if (SUCCEEDED(hr)) { // In order to see the full range of depth (including the less reliable far field depth) // we are setting nDepthMaxDistance to the extreme potential depth threshold nDepthMaxDistance = USHRT_MAX; //here we set maxDepth as 1000 mm (1 m) to simply cut the back background // Note: If you wish to filter by reliable depth distance, uncomment the following line. //// hr = pDepthFrame->get_DepthMaxReliableDistance(&nDepthMaxDistance); } if (SUCCEEDED(hr)) { hr = pDepthFrame->AccessUnderlyingBuffer(&nBufferSize, &pBuffer); } if (SUCCEEDED(hr)) { re=capture(pBuffer, nWidth, nHeight, depth_show, nDepthMinReliableDistance, nDepthMaxDistance); } if(pFrameDescription)SafeRelease(pFrameDescription); } if(pDepthFrame)SafeRelease(pDepthFrame); return re; }
//---------- void Color::update() { CHECK_OPEN IColorFrame * frame = NULL; IFrameDescription * frameDescription = NULL; try { //acquire frame if (FAILED(this->reader->AcquireLatestFrame(&frame))) { return; // we often throw here when no new frame is available } //allocate pixels and texture if we need to if (FAILED(frame->get_FrameDescription(&frameDescription))) { throw Exception("Failed to get frame description"); } int width, height; if (FAILED(frameDescription->get_Width(&width)) || FAILED(frameDescription->get_Height(&height))) { throw Exception("Failed to get width and height of frame"); } if (width != this->pixels.getWidth() || height != this->texture.getHeight()) { this->pixels.allocate(width, height, OF_IMAGE_COLOR_ALPHA); this->texture.allocate(this->pixels); } //update local assets if (FAILED(frame->CopyConvertedFrameDataToArray(this->pixels.size(), this->pixels.getPixels(), ColorImageFormat_Rgba))) { throw Exception("Couldn't pull pixel buffer"); } if (this->useTexture) { this->texture.loadData(this->pixels); } //update field of view if (FAILED(frameDescription->get_HorizontalFieldOfView(&this->horizontalFieldOfView))) { throw Exception("Failed to get horizonal field of view"); } if (FAILED(frameDescription->get_VerticalFieldOfView(&this->verticalFieldOfView))) { throw Exception("Failed to get vertical field of view"); } if (FAILED(frameDescription->get_DiagonalFieldOfView(&this->diagonalFieldOfView))) { throw Exception("Failed to get diagonal field of view"); } IColorCameraSettings * cameraSettings; if (FAILED(frame->get_ColorCameraSettings(&cameraSettings))) { throw Exception("Failed to get color camera settings"); } cameraSettings->get_ExposureTime(&this->exposure); cameraSettings->get_FrameInterval(&this->frameInterval); cameraSettings->get_Gain(&this->gain); cameraSettings->get_Gamma(&this->gamma); } catch (std::exception & e) { OFXKINECTFORWINDOWS2_ERROR << e.what(); } SafeRelease(frameDescription); SafeRelease(frame); }
void Microsoft2Grabber::DepthFrameArrived(IDepthFrameReference* pDepthFrameReference) { IDepthFrame* pDepthFrame = NULL; HRESULT hr = pDepthFrameReference->AcquireFrame(&pDepthFrame); //HRESULT hr = pDepthFrameReference->AcquireLatestFrame(&pDepthFrame); if(FAILED(hr)) return; //cout << "got a depth frame" << endl; INT64 nDepthTime = 0; IFrameDescription* pDepthFrameDescription = NULL; int nDepthWidth = 0; int nDepthHeight = 0; UINT nDepthBufferSize = 0; // get depth frame data hr = pDepthFrame->get_RelativeTime(&nDepthTime); if (SUCCEEDED(hr)) { hr = pDepthFrame->get_FrameDescription(&pDepthFrameDescription); } if (SUCCEEDED(hr)) { hr = pDepthFrameDescription->get_Width(&nDepthWidth); } if (SUCCEEDED(hr)) { hr = pDepthFrameDescription->get_Height(&nDepthHeight); } if (SUCCEEDED(hr)) { hr = pDepthFrame->AccessUnderlyingBuffer(&nDepthBufferSize, &m_pDepthBuffer); //WaitForSingleObject(hDepthMutex,INFINITE); Mat tmp = Mat(m_depthSize, DEPTH_PIXEL_TYPE, m_pDepthBuffer, Mat::AUTO_STEP); MatDepth depth_img = *((MatDepth*)&(tmp.clone())); m_depthTime = nDepthTime; if (depth_image_signal_->num_slots () > 0) { depth_image_signal_->operator()(depth_img); } if (num_slots<sig_cb_microsoft_point_cloud_rgba>() > 0 || all_data_signal_->num_slots() > 0 || image_depth_image_signal_->num_slots() > 0) { //rgb_sync_.add1 (depth_img, m_depthTime); imageDepthOnlyImageCallback(depth_img); } //ReleaseMutex(hDepthMutex); } SafeRelease(pDepthFrameDescription); SafeRelease(pDepthFrame); }
void BodyIndexMulticaster::ProcessNewBodyIndexFrame(IBodyIndexFrame* frame) { IFrameDescription* frameDescription = NULL; frame->get_FrameDescription(&frameDescription); int width = 0; int height = 0; frameDescription->get_Height(&height); frameDescription->get_Width(&width); UINT nBufferSize = height*width*sizeof(BYTE); UINT capacity; HRESULT hr = frame->AccessUnderlyingBuffer(&capacity, &pBuffer); if (!SUCCEEDED(hr)) return; if (pBuffer && (width == K4W2_BODY_INDEX_WIDTH) && (height == K4W2_BODY_INDEX_HEIGHT)) { // send previous frame first // encode current frame, will be send in the next cycle BYTE* pInput = pBuffer; BYTE* pOutput = pScaledBuffer; const BYTE* pEnd = pInput + (width * height); while (pInput < pEnd) { BYTE index = *pInput; *pOutput = ((signed char)index + 1)*40; ++pOutput; ++pInput; } gstSender.SendFrame((unsigned char*) pScaledBuffer, nBufferSize); } SafeRelease(frameDescription); }
void processColor() { if (!device) return; if (!m_pColorFrameReader) return; IColorFrame* pColorFrame = NULL; HRESULT hr = m_pColorFrameReader->AcquireLatestFrame(&pColorFrame); if (SUCCEEDED(hr)) { INT64 nTime = 0; IFrameDescription* pFrameDescription = NULL; int nWidth = 0; int nHeight = 0; ColorImageFormat imageFormat = ColorImageFormat_None; UINT nBufferSize = 0; RGBQUAD *src = NULL; hr = pColorFrame->get_RelativeTime(&nTime); if (SUCCEEDED(hr)) { hr = pColorFrame->get_FrameDescription(&pFrameDescription); } if (SUCCEEDED(hr)) { hr = pFrameDescription->get_Width(&nWidth); } if (SUCCEEDED(hr)) { hr = pFrameDescription->get_Height(&nHeight); } if (SUCCEEDED(hr)) { hr = pColorFrame->get_RawColorImageFormat(&imageFormat); } if (imageFormat != ColorImageFormat_Bgra) { if (!rgb_buffer) { rgb_buffer = new RGBQUAD[nWidth * nHeight]; } //post("image format %d", imageFormat); //error("not brga"); nBufferSize = nWidth * nHeight * sizeof(RGBQUAD); hr = pColorFrame->CopyConvertedFrameDataToArray(nBufferSize, reinterpret_cast<BYTE*>(rgb_buffer), ColorImageFormat_Rgba); if (FAILED(hr)) { error("failed to convert image"); return; } src = rgb_buffer; } hr = pColorFrame->AccessRawUnderlyingBuffer(&nBufferSize, reinterpret_cast<BYTE**>(&src)); ARGB * dst = (ARGB *)rgb_mat.back; int cells = nWidth * nHeight; //if (align_depth_to_color) { for (int i = 0; i < cells; ++i) { dst[i].r = src[i].rgbRed; dst[i].g = src[i].rgbGreen; dst[i].b = src[i].rgbBlue; } /*} else { // align color to depth: //std::fill(dst, dst + cells, RGB(0, 0, 0)); for (int i = 0; i < cells; ++i) { int c = colorCoordinates[i * 2]; int r = colorCoordinates[i * 2 + 1]; if (c >= 0 && c < KINECT_DEPTH_WIDTH && r >= 0 && r < KINECT_DEPTH_HEIGHT) { // valid location: depth value: int idx = r*KINECT_DEPTH_WIDTH + c; dst[i].r = src[idx].r; dst[i].g = src[idx].g; dst[i].b = src[idx].b; } } }*/ new_rgb_data = 1; } }
int _tmain( int argc, _TCHAR* argv[] ) { cv::setUseOptimized( true ); // Sensor IKinectSensor* pSensor; HRESULT hResult = S_OK; hResult = GetDefaultKinectSensor( &pSensor ); if( FAILED( hResult ) ){ std::cerr << "Error : GetDefaultKinectSensor" << std::endl; return -1; } hResult = pSensor->Open( ); if( FAILED( hResult ) ){ std::cerr << "Error : IKinectSensor::Open()" << std::endl; return -1; } // Source IColorFrameSource* pColorSource; hResult = pSensor->get_ColorFrameSource( &pColorSource ); if( FAILED( hResult ) ){ std::cerr << "Error : IKinectSensor::get_ColorFrameSource()" << std::endl; return -1; } IBodyFrameSource* pBodySource; hResult = pSensor->get_BodyFrameSource( &pBodySource ); if( FAILED( hResult ) ){ std::cerr << "Error : IKinectSensor::get_BodyFrameSource()" << std::endl; return -1; } // Reader IColorFrameReader* pColorReader; hResult = pColorSource->OpenReader( &pColorReader ); if( FAILED( hResult ) ){ std::cerr << "Error : IColorFrameSource::OpenReader()" << std::endl; return -1; } IBodyFrameReader* pBodyReader; hResult = pBodySource->OpenReader( &pBodyReader ); if( FAILED( hResult ) ){ std::cerr << "Error : IBodyFrameSource::OpenReader()" << std::endl; return -1; } // Description IFrameDescription* pDescription; hResult = pColorSource->get_FrameDescription( &pDescription ); if( FAILED( hResult ) ){ std::cerr << "Error : IColorFrameSource::get_FrameDescription()" << std::endl; return -1; } int width = 0; int height = 0; pDescription->get_Width( &width ); // 1920 pDescription->get_Height( &height ); // 1080 unsigned int bufferSize = width * height * 4 * sizeof( unsigned char ); cv::Mat bufferMat( height, width, CV_8UC4 ); cv::Mat bodyMat( height / 2, width / 2, CV_8UC4 ); cv::namedWindow( "Body" ); // Color Table cv::Vec3b color[BODY_COUNT]; color[0] = cv::Vec3b( 255, 0, 0 ); color[1] = cv::Vec3b( 0, 255, 0 ); color[2] = cv::Vec3b( 0, 0, 255 ); color[3] = cv::Vec3b( 255, 255, 0 ); color[4] = cv::Vec3b( 255, 0, 255 ); color[5] = cv::Vec3b( 0, 255, 255 ); // Coordinate Mapper ICoordinateMapper* pCoordinateMapper; hResult = pSensor->get_CoordinateMapper( &pCoordinateMapper ); if( FAILED( hResult ) ){ std::cerr << "Error : IKinectSensor::get_CoordinateMapper()" << std::endl; return -1; } while( 1 ){ // Frame IColorFrame* pColorFrame = nullptr; hResult = pColorReader->AcquireLatestFrame( &pColorFrame ); if( SUCCEEDED( hResult ) ){ hResult = pColorFrame->CopyConvertedFrameDataToArray( bufferSize, reinterpret_cast<BYTE*>( bufferMat.data ), ColorImageFormat::ColorImageFormat_Bgra ); if( SUCCEEDED( hResult ) ){ cv::resize( bufferMat, bodyMat, cv::Size(), 0.5, 0.5 ); } } //SafeRelease( pColorFrame ); IBodyFrame* pBodyFrame = nullptr; hResult = pBodyReader->AcquireLatestFrame( &pBodyFrame ); if( SUCCEEDED( hResult ) ){ IBody* pBody[BODY_COUNT] = { 0 }; hResult = pBodyFrame->GetAndRefreshBodyData( BODY_COUNT, pBody ); if( SUCCEEDED( hResult ) ){ for( int count = 0; count < BODY_COUNT; count++ ){ BOOLEAN bTracked = false; hResult = pBody[count]->get_IsTracked( &bTracked ); if( SUCCEEDED( hResult ) && bTracked ){ Joint joint[JointType::JointType_Count]; hResult = pBody[ count ]->GetJoints( JointType::JointType_Count, joint ); if( SUCCEEDED( hResult ) ){ // Left Hand State HandState leftHandState = HandState::HandState_Unknown; hResult = pBody[count]->get_HandLeftState( &leftHandState ); if( SUCCEEDED( hResult ) ){ ColorSpacePoint colorSpacePoint = { 0 }; hResult = pCoordinateMapper->MapCameraPointToColorSpace( joint[JointType::JointType_HandLeft].Position, &colorSpacePoint ); if( SUCCEEDED( hResult ) ){ int x = static_cast<int>( colorSpacePoint.X ); int y = static_cast<int>( colorSpacePoint.Y ); if( ( x >= 0 ) && ( x < width ) && ( y >= 0 ) && ( y < height ) ){ if( leftHandState == HandState::HandState_Open ){ cv::circle( bufferMat, cv::Point( x, y ), 75, cv::Scalar( 0, 128, 0 ), 5, CV_AA ); } else if( leftHandState == HandState::HandState_Closed ){ cv::circle( bufferMat, cv::Point( x, y ), 75, cv::Scalar( 0, 0, 128 ), 5, CV_AA ); } else if( leftHandState == HandState::HandState_Lasso ){ cv::circle( bufferMat, cv::Point( x, y ), 75, cv::Scalar( 128, 128, 0 ), 5, CV_AA ); } } } } // Right Hand State HandState rightHandState = HandState::HandState_Unknown; hResult = pBody[count]->get_HandRightState( &rightHandState ); if( SUCCEEDED( hResult ) ){ ColorSpacePoint colorSpacePoint = { 0 }; hResult = pCoordinateMapper->MapCameraPointToColorSpace( joint[JointType::JointType_HandRight].Position, &colorSpacePoint ); if( SUCCEEDED( hResult ) ){ int x = static_cast<int>( colorSpacePoint.X ); int y = static_cast<int>( colorSpacePoint.Y ); if( ( x >= 0 ) && ( x < width ) && ( y >= 0 ) && ( y < height ) ){ if( rightHandState == HandState::HandState_Open ){ cv::circle( bufferMat, cv::Point( x, y ), 75, cv::Scalar( 0, 128, 0 ), 5, CV_AA ); } else if( rightHandState == HandState::HandState_Closed ){ cv::circle( bufferMat, cv::Point( x, y ), 75, cv::Scalar( 0, 0, 128 ), 5, CV_AA ); } else if( rightHandState == HandState::HandState_Lasso ){ cv::circle( bufferMat, cv::Point( x, y ), 75, cv::Scalar( 128, 128, 0 ), 5, CV_AA ); } } } } // Joint for( int type = 0; type < JointType::JointType_Count; type++ ){ ColorSpacePoint colorSpacePoint = { 0 }; pCoordinateMapper->MapCameraPointToColorSpace( joint[type].Position, &colorSpacePoint ); int x = static_cast<int>( colorSpacePoint.X ); int y = static_cast<int>( colorSpacePoint.Y ); if( ( x >= 0 ) && ( x < width ) && ( y >= 0 ) && ( y < height ) ){ cv::circle( bufferMat, cv::Point( x, y ), 5, static_cast< cv::Scalar >( color[count] ), -1, CV_AA ); } } } /*// Activity UINT capacity = 0; DetectionResult detectionResults = DetectionResult::DetectionResult_Unknown; hResult = pBody[count]->GetActivityDetectionResults( capacity, &detectionResults ); if( SUCCEEDED( hResult ) ){ if( detectionResults == DetectionResult::DetectionResult_Yes ){ switch( capacity ){ case Activity::Activity_EyeLeftClosed: std::cout << "Activity_EyeLeftClosed" << std::endl; break; case Activity::Activity_EyeRightClosed: std::cout << "Activity_EyeRightClosed" << std::endl; break; case Activity::Activity_MouthOpen: std::cout << "Activity_MouthOpen" << std::endl; break; case Activity::Activity_MouthMoved: std::cout << "Activity_MouthMoved" << std::endl; break; case Activity::Activity_LookingAway: std::cout << "Activity_LookingAway" << std::endl; break; default: break; } } } else{ std::cerr << "Error : IBody::GetActivityDetectionResults()" << std::endl; }*/ /*// Appearance capacity = 0; detectionResults = DetectionResult::DetectionResult_Unknown; hResult = pBody[count]->GetAppearanceDetectionResults( capacity, &detectionResults ); if( SUCCEEDED( hResult ) ){ if( detectionResults == DetectionResult::DetectionResult_Yes ){ switch( capacity ){ case Appearance::Appearance_WearingGlasses: std::cout << "Appearance_WearingGlasses" << std::endl; break; default: break; } } } else{ std::cerr << "Error : IBody::GetAppearanceDetectionResults()" << std::endl; }*/ /*// Expression capacity = 0; detectionResults = DetectionResult::DetectionResult_Unknown; hResult = pBody[count]->GetExpressionDetectionResults( capacity, &detectionResults ); if( SUCCEEDED( hResult ) ){ if( detectionResults == DetectionResult::DetectionResult_Yes ){ switch( capacity ){ case Expression::Expression_Happy: std::cout << "Expression_Happy" << std::endl; break; case Expression::Expression_Neutral: std::cout << "Expression_Neutral" << std::endl; break; default: break; } } } else{ std::cerr << "Error : IBody::GetExpressionDetectionResults()" << std::endl; }*/ // Lean PointF amount; hResult = pBody[count]->get_Lean( &amount ); if( SUCCEEDED( hResult ) ){ std::cout << "amount : " << amount.X << ", " << amount.Y << std::endl; } } } cv::resize( bufferMat, bodyMat, cv::Size(), 0.5, 0.5 ); } for( int count = 0; count < BODY_COUNT; count++ ){ SafeRelease( pBody[count] ); } } //SafeRelease( pBodyFrame ); SafeRelease( pColorFrame ); SafeRelease( pBodyFrame ); cv::imshow( "Body", bodyMat ); if( cv::waitKey( 10 ) == VK_ESCAPE ){ break; } } SafeRelease( pColorSource ); SafeRelease( pBodySource ); SafeRelease( pColorReader ); SafeRelease( pBodyReader ); SafeRelease( pDescription ); SafeRelease( pCoordinateMapper ); if( pSensor ){ pSensor->Close(); } SafeRelease( pSensor ); cv::destroyAllWindows(); return 0; }
int main() { // 1. Sensor related code cout << "Try to get default sensor" << endl; { if (GetDefaultKinectSensor(&pSensor) != S_OK) { cerr << "Get Sensor failed" << endl; return -1; } cout << "Try to open sensor" << endl; if (pSensor->Open() != S_OK) { cerr << "Can't open sensor" << endl; return -1; } } // 2. Color related code cout << "Try to get color source" << endl; { // Get frame source IColorFrameSource* pFrameSource = nullptr; if (pSensor->get_ColorFrameSource(&pFrameSource) != S_OK) { cerr << "Can't get color frame source" << endl; return -1; } // Get frame description cout << "get color frame description" << endl; IFrameDescription* pFrameDescription = nullptr; if (pFrameSource->get_FrameDescription(&pFrameDescription) == S_OK) { pFrameDescription->get_Width(&iColorWidth); pFrameDescription->get_Height(&iColorHeight); uColorPointNum = iColorWidth * iColorHeight; uColorBufferSize = uColorPointNum * 4 * sizeof(BYTE); pCSPoints = new CameraSpacePoint[uColorPointNum]; pColorBuffer = new BYTE[4 * uColorPointNum]; } pFrameDescription->Release(); pFrameDescription = nullptr; // get frame reader cout << "Try to get color frame reader" << endl; if (pFrameSource->OpenReader(&pColorFrameReader) != S_OK) { cerr << "Can't get color frame reader" << endl; return -1; } // release Frame source cout << "Release frame source" << endl; pFrameSource->Release(); pFrameSource = nullptr; } // 3. Depth related code cout << "Try to get depth source" << endl; { // Get frame source IDepthFrameSource* pFrameSource = nullptr; if (pSensor->get_DepthFrameSource(&pFrameSource) != S_OK) { cerr << "Can't get depth frame source" << endl; return -1; } // Get frame description cout << "get depth frame description" << endl; IFrameDescription* pFrameDescription = nullptr; if (pFrameSource->get_FrameDescription(&pFrameDescription) == S_OK) { int iDepthWidth = 0, iDepthHeight = 0; pFrameDescription->get_Width(&iDepthWidth); pFrameDescription->get_Height(&iDepthHeight); uDepthPointNum = iDepthWidth * iDepthHeight; pDepthBuffer = new UINT16[uDepthPointNum]; } pFrameDescription->Release(); pFrameDescription = nullptr; // get frame reader cout << "Try to get depth frame reader" << endl; if (pFrameSource->OpenReader(&pDepthFrameReader) != S_OK) { cerr << "Can't get depth frame reader" << endl; return -1; } // release Frame source cout << "Release frame source" << endl; pFrameSource->Release(); pFrameSource = nullptr; } // 4. Coordinate Mapper if (pSensor->get_CoordinateMapper(&pCoordinateMapper) != S_OK) { cerr << "get_CoordinateMapper failed" << endl; return -1; } while (1) { idle(); if ((int)pColorBuffer[0] != 0) { capture_point(); } } ExitFunction(); return 0; }
int main() { // name and position windows cvNamedWindow("Color Probabilistic Tracking - Samples", 1); cvMoveWindow("Color Probabilistic Tracking - Samples", 0, 0); cvNamedWindow("Color Probabilistic Tracking - Result", 1); cvMoveWindow("Color Probabilistic Tracking - Result", 1000, 0); //control mouse setMouseCallback("Color Probabilistic Tracking - Samples", onMouse, 0); cv::setUseOptimized(true); // Sensor IKinectSensor* pSensor; HRESULT hResult = S_OK; hResult = GetDefaultKinectSensor(&pSensor); if (FAILED(hResult)) { std::cerr << "Error : GetDefaultKinectSensor" << std::endl; return -1; } hResult = pSensor->Open(); if (FAILED(hResult)) { std::cerr << "Error : IKinectSensor::Open()" << std::endl; return -1; } // Source IColorFrameSource* pColorSource; hResult = pSensor->get_ColorFrameSource(&pColorSource); if (FAILED(hResult)) { std::cerr << "Error : IKinectSensor::get_ColorFrameSource()" << std::endl; return -1; } IDepthFrameSource* pDepthSource; hResult = pSensor->get_DepthFrameSource(&pDepthSource); if (FAILED(hResult)) { std::cerr << "Error : IKinectSensor::get_DepthFrameSource()" << std::endl; return -1; } /*IBodyIndexFrameSource* pBodyIndexSource; hResult = pSensor->get_BodyIndexFrameSource(&pBodyIndexSource);*/ // Reader IColorFrameReader* pColorReader; hResult = pColorSource->OpenReader(&pColorReader); if (FAILED(hResult)) { std::cerr << "Error : IColorFrameSource::OpenReader()" << std::endl; return -1; } IDepthFrameReader* pDepthReader; hResult = pDepthSource->OpenReader(&pDepthReader); if (FAILED(hResult)) { std::cerr << "Error : IDepthFrameSource::OpenReader()" << std::endl; return -1; } //IBodyIndexFrameReader* pBodyIndexReader;//saferealease //hResult = pBodyIndexSource->OpenReader(&pBodyIndexReader); // Description IFrameDescription* pColorDescription; hResult = pColorSource->get_FrameDescription(&pColorDescription); if (FAILED(hResult)) { std::cerr << "Error : IColorFrameSource::get_FrameDescription()" << std::endl; return -1; } int colorWidth = 0; int colorHeight = 0; pColorDescription->get_Width(&colorWidth); // 1920 pColorDescription->get_Height(&colorHeight); // 1080 unsigned int colorBufferSize = colorWidth * colorHeight * 4 * sizeof(unsigned char); cv::Mat colorBufferMat(colorHeight, colorWidth, CV_8UC4); cv::Mat colorMat(colorHeight / 2, colorWidth / 2, CV_8UC4); cv::namedWindow("Color"); RGBQUAD* m_pDepthRGBX; m_pDepthRGBX = new RGBQUAD[512 * 424];// create heap storage for color pixel data in RGBX format IFrameDescription* pDepthDescription; hResult = pDepthSource->get_FrameDescription(&pDepthDescription); if (FAILED(hResult)) { std::cerr << "Error : IDepthFrameSource::get_FrameDescription()" << std::endl; return -1; } int depthWidth = 0; int depthHeight = 0; pDepthDescription->get_Width(&depthWidth); // 512 pDepthDescription->get_Height(&depthHeight); // 424 unsigned int depthBufferSize = depthWidth * depthHeight * sizeof(unsigned short); cv::Mat depthBufferMat(depthHeight, depthWidth, CV_16UC1); UINT16* pDepthBuffer = nullptr; cv::Mat depthMat(depthHeight, depthWidth, CV_8UC1); cv::namedWindow("Depth"); //UINT32 nBodyIndexSize = 0; //BYTE* pIndexBuffer = nullptr;//This hasn't been safe realease yet // Coordinate Mapper ICoordinateMapper* pCoordinateMapper; hResult = pSensor->get_CoordinateMapper(&pCoordinateMapper); if (FAILED(hResult)) { std::cerr << "Error : IKinectSensor::get_CoordinateMapper()" << std::endl; return -1; } cv::Mat coordinateMapperMat(depthHeight, depthWidth, CV_8UC4); cv::namedWindow("CoordinateMapper"); unsigned short minDepth, maxDepth; pDepthSource->get_DepthMinReliableDistance(&minDepth); pDepthSource->get_DepthMaxReliableDistance(&maxDepth); while (1) { double t = (double)getTickCount(); // Color Frame IColorFrame* pColorFrame = nullptr; hResult = pColorReader->AcquireLatestFrame(&pColorFrame); if (SUCCEEDED(hResult)) { hResult = pColorFrame->CopyConvertedFrameDataToArray(colorBufferSize, reinterpret_cast<BYTE*>(colorBufferMat.data), ColorImageFormat::ColorImageFormat_Bgra); if (SUCCEEDED(hResult)) { cv::resize(colorBufferMat, colorMat, cv::Size(), 0.5, 0.5); } } //SafeRelease( pColorFrame ); // Depth Frame IDepthFrame* pDepthFrame = nullptr; hResult = pDepthReader->AcquireLatestFrame(&pDepthFrame); if (SUCCEEDED(hResult)) { hResult = pDepthFrame->AccessUnderlyingBuffer(&depthBufferSize, reinterpret_cast<UINT16**>(&depthBufferMat.data)); } if (SUCCEEDED(hResult)) { hResult = pDepthFrame->AccessUnderlyingBuffer(&depthBufferSize, &pDepthBuffer); if (SUCCEEDED(hResult)) { RGBQUAD* pRGBX = m_pDepthRGBX; // end pixel is start + width*height - 1 const UINT16* pBufferEnd = pDepthBuffer + (512 * 424); int index = 0; while (pDepthBuffer < pBufferEnd) { USHORT depth = *pDepthBuffer; BYTE intensity = static_cast<BYTE>((depth >= 50) && (depth <= 5000) ? (depth % 256) : 0); pRGBX->rgbRed = intensity; pRGBX->rgbGreen = intensity; pRGBX->rgbBlue = intensity; depthData[index] = depth; ++index; ++pRGBX; ++pDepthBuffer; } } } Mat DepthImage(424, 512, CV_8UC4, m_pDepthRGBX); //SafeRelease( pDepthFrame ); // Mapping (Depth to Color) if (SUCCEEDED(hResult)) { std::vector<ColorSpacePoint> colorSpacePoints(depthWidth * depthHeight); hResult = pCoordinateMapper->MapDepthFrameToColorSpace(depthWidth * depthHeight, reinterpret_cast<UINT16*>(depthBufferMat.data), depthWidth * depthHeight, &colorSpacePoints[0]); if (SUCCEEDED(hResult)) { coordinateMapperMat = cv::Scalar(0, 0, 0, 0); for (int y = 0; y < depthHeight; y++) { for (int x = 0; x < depthWidth; x++) { unsigned int index = y * depthWidth + x; ColorSpacePoint point = colorSpacePoints[index]; int colorX = static_cast<int>(std::floor(point.X + 0.5)); int colorY = static_cast<int>(std::floor(point.Y + 0.5)); unsigned short depth = depthBufferMat.at<unsigned short>(y, x); if ((colorX >= 0) && (colorX < colorWidth) && (colorY >= 0) && (colorY < colorHeight)/* && ( depth >= minDepth ) && ( depth <= maxDepth )*/) { coordinateMapperMat.at<cv::Vec4b>(y, x) = colorBufferMat.at<cv::Vec4b>(colorY, colorX); } } } } } if (SUCCEEDED(hResult)) { //Particle Filter Start from here frame = &(IplImage)coordinateMapperMat;//transorm mat into IplImage if (image == 0) { // initialize variables and allocate buffers image = cvCreateImage(cvGetSize(frame), 8, 4);//every pixel has 4 channels image->origin = frame->origin; result = cvCreateImage(cvGetSize(frame), 8, 4); result->origin = frame->origin; hsv = cvCreateImage(cvGetSize(frame), 8, 3); hue = cvCreateImage(cvGetSize(frame), 8, 1); sat = cvCreateImage(cvGetSize(frame), 8, 1); histimg_ref = cvCreateImage(cvGetSize(frame), 8, 3); histimg_ref->origin = frame->origin; cvZero(histimg_ref); histimg = cvCreateImage(cvGetSize(frame), 8, 3); histimg->origin = frame->origin; cvZero(histimg); bin_w = histimg_ref->width / BIN; bin_h = histimg_ref->height / BIN; data1.sample_t = reinterpret_cast<Region *> (malloc(sizeof(Region)* SAMPLE)); data1.sample_t_1 = reinterpret_cast<Region *> (malloc(sizeof(Region)* SAMPLE)); data1.sample_weight = reinterpret_cast<double *> (malloc(sizeof(double)* SAMPLE)); data1.accum_weight = reinterpret_cast<double *> (malloc(sizeof(double)* SAMPLE)); } cvCopy(frame, image); cvCopy(frame, result); cvCvtColor(image, hsv, CV_BGR2HSV);//image ~ hsv if (tracking) { //v_max = 0.0; cvSplit(hsv, hue, 0, 0, 0);//hsv->hue cvSplit(hsv, 0, 0, sat, 0);//hsv-saturation if (selecting) { // get the selected target area //ref_v_max = 0.0; area.width = abs(P_org.x - P_end.x); area.height = abs(P_org.y - P_end.y); area.x = MIN(P_org.x, P_end.x); area.y = MIN(P_org.y, P_end.y); cvZero(histimg_ref); // build reference histogram cvSetImageROI(hue, area); cvSetImageROI(sat, area); // zero reference histogram for (i = 0; i < BIN; i++) for (j = 0; j < BIN; j++) hist_ref[i][j] = 0.0; // calculate reference histogram for (i = 0; i < area.height; i++) { for (j = 0; j < area.width; j++) { im_hue = cvGet2D(hue, i, j); im_sat = cvGet2D(sat, i, j); k = int(im_hue.val[0] / STEP_HUE); h = int(im_sat.val[0] / STEP_SAT); hist_ref[k][h] = hist_ref[k][h] + 1.0; } } // rescale the value of each bin in the reference histogram // and show it as an image for (i = 0; i < BIN; i++) { for (j = 0; j < BIN; j++) { hist_ref[i][j] = hist_ref[i][j] / (area.height*area.width); } } cvResetImageROI(hue); cvResetImageROI(sat); // initialize tracking and samples track_win = area; Initdata(track_win); track_win_last = track_win; // set up flag of tracking selecting = 0; } // sample propagation and weighting track_win = ImProcess(hue, sat, hist_ref, track_win_last); FrameNumber++; track_win_last = track_win; cvZero(histimg); // draw the one RED bounding box cvRectangle(image, cvPoint(track_win.x, track_win.y), cvPoint(track_win.x + track_win.width, track_win.y + track_win.height), CV_RGB(255, 0, 0), 2); printf("width = %d, height = %d\n", track_win.width, track_win.height); //save certian images if (FrameNumber % 10 == 0) { if (FrameNumber / 10 == 1) cvSaveImage("./imageout1.jpg", image); if (FrameNumber / 10 == 2) cvSaveImage("./imageout2.jpg", image); if (FrameNumber / 10 == 3) cvSaveImage("./imageout3.jpg", image); if (FrameNumber / 10 == 4) cvSaveImage("./imageout4.jpg", image); if (FrameNumber / 10 == 5) cvSaveImage("./imageout5.jpg", image); if (FrameNumber / 10 == 6) cvSaveImage("./imageout6.jpg", image); if (FrameNumber / 10 == 7) cvSaveImage("./imageout7.jpg", image); if (FrameNumber / 10 == 8) cvSaveImage("./imageout8.jpg", image); } //save certian images if (FrameNumber % 10 == 0) { if (FrameNumber / 10 == 1) cvSaveImage("./resultout1.jpg", result); if (FrameNumber / 10 == 2) cvSaveImage("./resultout2.jpg", result); if (FrameNumber / 10 == 3) cvSaveImage("./resultout3.jpg", result); if (FrameNumber / 10 == 4) cvSaveImage("./resultout4.jpg", result); if (FrameNumber / 10 == 5) cvSaveImage("./resultout5.jpg", result); if (FrameNumber / 10 == 6) cvSaveImage("./resultout6.jpg", result); if (FrameNumber / 10 == 7) cvSaveImage("./resultout7.jpg", result); if (FrameNumber / 10 == 8) cvSaveImage("./resultout8.jpg", result); } //draw a same bounding box in DepthImage rectangle(DepthImage, track_win, CV_RGB(255, 0, 0), 2); //******************************************************Geodesic Distance*************************************************************************************** //Point propagation and weight if (PointTrack == 1) { if (PointSelect == 1)//only visit once { // initialize tracking and samples for (int i = 0; i < SAMPLE; i++) { point[i].x_1 = P_track.x; point[i].y_1 = P_track.y; point[i].z_1 = depthData[P_track.x + P_track.y * 512]; point[i].x_1_prime = 0.0; point[i].y_1_prime = 0.0; } refeFlag = 1; p_win = P_track; //p_transtart is the start point of the surface mesh P_transtart.x = track_win.x; P_transtart.y = track_win.y; PointSelect = 0; } //construct the graph(mesh) ConstructMesh(depthData, adjlist, P_transtart,track_win.width,track_win.height); //calculate shortest path vector<int> vertexDist; vertexDist.resize(track_win.width*track_win.height); ShortestPath(P_extre, adjlist, vertexDist); cvCircle(image, P_extre, 3, CV_RGB(0, 255, 0),1); //generate the refernce distance for comparing if (refeFlag > 0) { cvCircle(image, p_win, 3, CV_RGB(0, 0, 255), 1); int track = abs(P_transtart.x - P_track.x) + track_win.width * abs(P_transtart.y - P_track.y); referDistance = vertexDist[track]; refeFlag = 0; } //samples propagation PredictPoint(p_win); //get geodesic distance for each sample. //find the sample which have most similar distance to the refernce distance float Dist, AbsDist, WinDist, minAbsDist = 10000; int number,sum=0,count=0; for (int i = 0; i < SAMPLE; i++) { int t = abs(P_transtart.x - point[i].x) + track_win.width * abs(P_transtart.y - point[i].y); if (adjlist[t].v == false) { count++; continue; } int refer = abs(point[i].x - P_transtart.x) + track_win.width * abs(point[i].y - P_transtart.y); Dist = vertexDist[refer]; AbsDist = fabs(referDistance - Dist); //point[i].SampleWeight = AbsDist; //point[i].AccumWeight = sum; //sum = sum + AbsDist; if (AbsDist < minAbsDist) { AbsDist = Dist; number = i; WinDist = Dist; } } //for (int i = 0; i < SAMPLE; i++) //{ // point[i].SampleWeight = point[i].SampleWeight / sum; // point[i].AccumWeight = point[i].AccumWeight / sum; //} printf("referDist = %f, winDist = %f, discardPoints = %d\n", referDistance, WinDist,count); p_win_last = p_win; p_win.x = point[number].x; p_win.y = point[number].y; //samples re-location float deltaX = p_win.x - p_win_last.x; float deltaY = p_win.y - p_win_last.y; UpdatePoint(number, deltaX, deltaY); cvCircle(image, p_win, 5, CV_RGB(0, 0, 0)); } // //**************************************************************************************************************************************** } // if still selecting a target, show the RED selected area else cvRectangle(image, P_org, P_end, CV_RGB(255, 0, 0), 1); } imshow("Depth", DepthImage); cvShowImage("Color Probabilistic Tracking - Samples", image); cvShowImage("Color Probabilistic Tracking - Result", result); SafeRelease(pColorFrame); SafeRelease(pDepthFrame); //SafeRelease(pBodyIndexFrame); cv::imshow("Color", colorMat); cv::imshow("Depth", DepthImage); cv::imshow("CoordinateMapper", coordinateMapperMat); //END OF THE TIME POINT t = ((double)getTickCount() - t) / getTickFrequency(); t = 1 / t; //cout << "FPS:" << t << "FrameNumber\n" << FrameNumebr<< endl; printf("FPS:%f Frame:%d \n\n", t, FrameNumber); if (cv::waitKey(30) == VK_ESCAPE) { break; } } SafeRelease(pColorSource); SafeRelease(pDepthSource); //SafeRelease(pBodyIndexSource); SafeRelease(pColorReader); SafeRelease(pDepthReader); //SafeRelease(pBodyIndexReader); SafeRelease(pColorDescription); SafeRelease(pDepthDescription); SafeRelease(pCoordinateMapper); if (pSensor) { pSensor->Close(); } SafeRelease(pSensor); cv::destroyAllWindows(); cvReleaseImage(&image); cvReleaseImage(&result); cvReleaseImage(&histimg_ref); cvReleaseImage(&histimg); cvReleaseImage(&hsv); cvReleaseImage(&hue); cvReleaseImage(&sat); cvDestroyWindow("Color Probabilistic Tracking - Samples"); cvDestroyWindow("Color Probabilistic Tracking - Result"); return 0; }
void capture(Image::Ptr& pImage) { HRESULT hr; if (m_pMultiSourceFrameReader==nullptr) { camera->getContext().error("CameraKinectDevice::capture: m_pMultiSourceFrameReader is nullptr\n"); // this is bad news - perhaps throw? return; // @@@ } IMultiSourceFrame* pMultiSourceFrame = nullptr; IDepthFrame* pDepthFrame = nullptr; IColorFrame* pColorFrame = nullptr; const golem::MSecTmU32 waitStep = 1; golem::MSecTmU32 timeWaited = 0; golem::Sleep timer; while (FAILED(hr = m_pMultiSourceFrameReader->AcquireLatestFrame(&pMultiSourceFrame))) { // this is in CameraOpenNI, but suspect may be causing problem here // if (camera->isTerminating()) return; timer.msleep(waitStep); timeWaited += waitStep; if (timeWaited >= timeout) { camera->getContext().error("CameraKinectDevice::capture: failed to acquire frame within %d ms\n", timeout); // keep going - don't return with nothing; reset stopwatch @@@ timeWaited = 0; } } const golem::SecTmReal systemTime1 = camera->getContext().getTimer().elapsed(); if (SUCCEEDED(hr)) { IDepthFrameReference* pDepthFrameReference = nullptr; hr = pMultiSourceFrame->get_DepthFrameReference(&pDepthFrameReference); if (SUCCEEDED(hr)) { hr = pDepthFrameReference->AcquireFrame(&pDepthFrame); } RELEASE_PTR(pDepthFrameReference); } if (SUCCEEDED(hr)) { IColorFrameReference* pColorFrameReference = nullptr; hr = pMultiSourceFrame->get_ColorFrameReference(&pColorFrameReference); if (SUCCEEDED(hr)) { hr = pColorFrameReference->AcquireFrame(&pColorFrame); } RELEASE_PTR(pColorFrameReference); } if (SUCCEEDED(hr)) { INT64 nDepthTime = 0; IFrameDescription* pDepthFrameDescription = nullptr; int nDepthWidth = 0; int nDepthHeight = 0; UINT nDepthBufferSize = 0; UINT16 *pDepthBuffer = nullptr; IFrameDescription* pColorFrameDescription = nullptr; int nColorWidth = 0; int nColorHeight = 0; ColorImageFormat imageFormat = ColorImageFormat_None; UINT nColorBufferSize = 0; RGBQUAD *pColorBuffer = nullptr; // get depth frame data hr = pDepthFrame->get_RelativeTime(&nDepthTime); if (SUCCEEDED(hr)) hr = pDepthFrame->get_FrameDescription(&pDepthFrameDescription); if (SUCCEEDED(hr)) hr = pDepthFrameDescription->get_Width(&nDepthWidth); if (SUCCEEDED(hr)) hr = pDepthFrameDescription->get_Height(&nDepthHeight); if (SUCCEEDED(hr)) hr = pDepthFrame->AccessUnderlyingBuffer(&nDepthBufferSize, &pDepthBuffer); // get color frame data if (SUCCEEDED(hr)) hr = pColorFrame->get_FrameDescription(&pColorFrameDescription); if (SUCCEEDED(hr)) hr = pColorFrameDescription->get_Width(&nColorWidth); if (SUCCEEDED(hr)) hr = pColorFrameDescription->get_Height(&nColorHeight); if (SUCCEEDED(hr)) hr = pColorFrame->get_RawColorImageFormat(&imageFormat); if (SUCCEEDED(hr)) { if (imageFormat == ColorImageFormat_Bgra) { hr = pColorFrame->AccessRawUnderlyingBuffer(&nColorBufferSize, reinterpret_cast<BYTE**>(&pColorBuffer)); } else if (m_pColorRGBX) { pColorBuffer = m_pColorRGBX; nColorBufferSize = nColorWidth * nColorHeight * sizeof(RGBQUAD); hr = pColorFrame->CopyConvertedFrameDataToArray(nColorBufferSize, reinterpret_cast<BYTE*>(pColorBuffer), ColorImageFormat_Bgra); } else { hr = E_FAIL; } } cv::Mat colorFrame; colorFrame.create(cColorHeight, cColorWidth, CV_8UC4); colorFrame.setTo(cv::Scalar(0, 0, 0, 0)); //copying color buffer into cv::mat memcpy(colorFrame.data, pColorBuffer, (nColorWidth * nColorHeight) * 4); //originally kinect has mirrored image cv::flip(colorFrame, colorFrame, 1); //converstion to pImage which is BGR cv::Mat colorFrameT; cv::cvtColor(colorFrame, colorFrameT, CV_BGRA2BGR); if (mode & CameraKinect::MODE_COLOUR) { // initialize buffer if (pImage == nullptr) pImage.reset(new Image()); pImage->reserve((int)1920, (int)1080); //printf("Before clonning frame\n"); IplImage * img = cvCloneImage(&(IplImage)colorFrameT); cvCopy(img, pImage->image); cvReleaseImage(&img); //printf("After clonning frame\n"); const golem::SecTmReal systemTime2 = camera->getContext().getTimer().elapsed(); //seting timestamp pImage->timeStamp = REAL_HALF*(systemTime1 + systemTime2); } //camera->getContext().debug("After colour\n"); //needed for retain information of the begining of the buffer const UINT16* pBufferStart = pDepthBuffer; const UINT16* pBufferEnd = pDepthBuffer + (nDepthWidth * nDepthHeight); //copying depth buffer into cv::Mat long depthSizeP = (pBufferEnd - pBufferStart) * 2; cv::Mat depthFrame; depthFrame.create(cDepthHeight, cDepthWidth, CV_16UC1); depthFrame.setTo(cv::Scalar(0)); memcpy(depthFrame.data, pBufferStart, depthSizeP); //originally kinect has mirrored image cv::flip(depthFrame, depthFrame, 1); //camera->getContext().debug("After getting depth data\n"); //depth mode color data mapped into camera space (remember high resolution 1920x1080) if (mode & CameraKinect::MODE_DEPTH && (std::stoi(this->property.format.c_str()) == 101)) { //camera->getContext().debug("In depth mode\n"); if (pImage == nullptr) pImage.reset(new Image()); pImage->reserve((int)1920, (int)1080); pImage->cloud.reset(new PointSeq((uint32_t)property.width, (uint32_t)property.height)); //Obtaining the pointcloud hasImageStream = false; float missingPoint = std::numeric_limits<float>::quiet_NaN(); hr = m_pCoordinateMapper->MapColorFrameToCameraSpace(nDepthWidth * nDepthHeight, (UINT16*)pDepthBuffer, cColorHeight * cColorWidth, m_pCameraCoordinatesColor); golem::Point* pOutWidth = &pImage->cloud->front(); CameraSpacePoint* pCamCWidth = m_pCameraCoordinatesColor; for (int y = 0; y < cColorHeight; ++y) { golem::Point* pOut = pOutWidth; CameraSpacePoint* pCamC = pCamCWidth; for (int x = 0; x < cColorWidth; ++x, pCamC++, ++pOut) { cv::Vec4b color; int abc = pCamC->Z; if (abc != 0) { pOut->x = pCamC->X; pOut->y = pCamC->Y; pOut->z = pCamC->Z; } else { pOut->x = pOut->y = pOut->z = missingPoint; //printf("Getting to this point4\n"); } pOut->normal_x = pOut->normal_y = pOut->normal_z = golem::numeric_const<float>::ZERO; color = colorFrame.at<cv::Vec4b>(y, x); if (hasImageStream) { pOut->b = color[0]; pOut->g = color[1]; pOut->r = color[2]; pOut->a = colour._rgba.a; } else { pOut->r = colour._rgba.r; pOut->g = colour._rgba.g; pOut->b = colour._rgba.b; pOut->a = colour._rgba.a; } } colorFrame += cColorWidth; pCamCWidth += cColorWidth; pOutWidth += cColorWidth; } } //depth mode depth data mapped into camera space if (mode & CameraKinect::MODE_DEPTH && (std::stoi(this->property.format.c_str()) == 102)) { if (pImage == nullptr) pImage.reset(new Image()); //camera->getContext().debug("In depth mode\n"); pImage->cloud.reset(new PointSeq((uint32_t)property.width, (uint32_t)property.height)); //Obtaining the pointcloud hasImageStream = false; float missingPoint = std::numeric_limits<float>::quiet_NaN(); hr = m_pCoordinateMapper->MapDepthFrameToCameraSpace(nDepthWidth * nDepthHeight, (UINT16*)pDepthBuffer, cDepthHeight * cDepthWidth, m_pCameraCoordinatesDepth); golem::Point* pOutWidth = &pImage->cloud->front(); CameraSpacePoint* pCamDWidth = m_pCameraCoordinatesDepth; for (int y = 0; y < cDepthHeight; ++y) { golem::Point* pOut = pOutWidth; CameraSpacePoint* pCamC = pCamDWidth; for (int x = 0; x < cDepthWidth; ++x, pCamC++, ++pOut) { cv::Vec4b color; //int abc = pCamC->Z; if (pCamC->Z != 0) { pOut->x = pCamC->X; pOut->y = pCamC->Y; pOut->z = pCamC->Z; } else { pOut->x = missingPoint; pOut->y = missingPoint; pOut->z = missingPoint; // //printf("Getting to this point4\n"); } pOut->normal_x = pOut->normal_y = pOut->normal_z = golem::numeric_const<float>::ZERO; /*color = colorframe.at<cv::vec4b>(y, x); if (hasimagestream) { pout->b = color[0]; pout->g = color[1]; pout->r = color[2]; pout->a = colour._rgba.a; }*/ //else { pOut->r = colour._rgba.r; pOut->g = colour._rgba.g; pOut->b = colour._rgba.b; pOut->a = colour._rgba.a; //} } //colorFrame += cDepthWidth; pCamDWidth += cDepthWidth; pOutWidth += cDepthWidth; } golem::Mat33 aMatrix; aMatrix.m11 = golem::Real(-1); aMatrix.m12 = golem::Real(0); aMatrix.m13 = golem::Real(0); aMatrix.m21 = golem::Real(0); aMatrix.m22 = golem::Real(-1); aMatrix.m23 = golem::Real(0); aMatrix.m31 = golem::Real(0); aMatrix.m32 = golem::Real(0); aMatrix.m33 = golem::Real(1); golem::Vec3 aVector; aVector.v1 = 0; aVector.v2 = 0; aVector.v3 = 0; golem::Mat34 aMatrix34; aMatrix34.R = aMatrix; aMatrix34.p = aVector; Cloud::PointSeqPtr pCloud = pImage->cloud; const Mat34 frame = Cloud::getSensorFrame(*pCloud); Cloud::transform(aMatrix34, *pCloud, *pCloud); Cloud::setSensorFrame(frame, *pCloud); // don't change the camera frame !!!!!!!!!!!!!!!!!!!!!!!!!!!! const golem::SecTmReal systemTime2 = camera->getContext().getTimer().elapsed(); //seting timestamp pImage->timeStamp = REAL_HALF*(systemTime1 + systemTime2); } RELEASE_PTR(pDepthFrameDescription); RELEASE_PTR(pColorFrameDescription); } RELEASE_PTR(pDepthFrame); RELEASE_PTR(pColorFrame); RELEASE_PTR(pMultiSourceFrame); }
int _tmain( int argc, _TCHAR* argv[] ) { cv::setUseOptimized( true ); // Sensor IKinectSensor* pSensor; HRESULT hResult = S_OK; hResult = GetDefaultKinectSensor( &pSensor ); if( FAILED( hResult ) ){ std::cerr << "Error : GetDefaultKinectSensor" << std::endl; return -1; } hResult = pSensor->Open(); if( FAILED( hResult ) ){ std::cerr << "Error : IKinectSensor::Open()" << std::endl; return -1; } // Source IDepthFrameSource* pDepthSource; hResult = pSensor->get_DepthFrameSource( &pDepthSource ); if( FAILED( hResult ) ){ std::cerr << "Error : IKinectSensor::get_DepthFrameSource()" << std::endl; return -1; } // Reader IDepthFrameReader* pDepthReader; hResult = pDepthSource->OpenReader( &pDepthReader ); if( FAILED( hResult ) ){ std::cerr << "Error : IDepthFrameSource::OpenReader()" << std::endl; return -1; } // Description IFrameDescription* pDescription; hResult = pDepthSource->get_FrameDescription( &pDescription ); if( FAILED( hResult ) ){ std::cerr << "Error : IDepthFrameSource::get_FrameDescription()" << std::endl; return -1; } int width = 0; int height = 0; pDescription->get_Width( &width ); // 512 pDescription->get_Height( &height ); // 424 unsigned int bufferSize = width * height * sizeof( unsigned short ); cv::Mat bufferMat( height, width, CV_16UC1 ); cv::Mat depthMat( height, width, CV_8UC1 ); cv::namedWindow( "Depth" ); // Coordinate Mapper ICoordinateMapper* pCoordinateMapper; hResult = pSensor->get_CoordinateMapper( &pCoordinateMapper ); if( FAILED( hResult ) ){ std::cerr << "Error : IKinectSensor::get_CoordinateMapper()" << std::endl; return -1; } // Create Reconstruction NUI_FUSION_RECONSTRUCTION_PARAMETERS reconstructionParameter; reconstructionParameter.voxelsPerMeter = 256; reconstructionParameter.voxelCountX = 512; reconstructionParameter.voxelCountY = 384; reconstructionParameter.voxelCountZ = 512; Matrix4 worldToCameraTransform; SetIdentityMatrix( worldToCameraTransform ); INuiFusionReconstruction* pReconstruction; hResult = NuiFusionCreateReconstruction( &reconstructionParameter, NUI_FUSION_RECONSTRUCTION_PROCESSOR_TYPE_AMP, -1, &worldToCameraTransform, &pReconstruction ); if( FAILED( hResult ) ){ std::cerr << "Error : NuiFusionCreateReconstruction()" << std::endl; return -1; } // Create Image Frame // Depth NUI_FUSION_IMAGE_FRAME* pDepthFloatImageFrame; hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_FLOAT, width, height, nullptr, &pDepthFloatImageFrame ); if( FAILED( hResult ) ){ std::cerr << "Error : NuiFusionCreateImageFrame( FLOAT )" << std::endl; return -1; } // SmoothDepth NUI_FUSION_IMAGE_FRAME* pSmoothDepthFloatImageFrame; hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_FLOAT, width, height, nullptr, &pSmoothDepthFloatImageFrame ); if( FAILED( hResult ) ){ std::cerr << "Error : NuiFusionCreateImageFrame( FLOAT )" << std::endl; return -1; } // Point Cloud NUI_FUSION_IMAGE_FRAME* pPointCloudImageFrame; hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_POINT_CLOUD, width, height, nullptr, &pPointCloudImageFrame ); if( FAILED( hResult ) ){ std::cerr << "Error : NuiFusionCreateImageFrame( POINT_CLOUD )" << std::endl; return -1; } // Surface NUI_FUSION_IMAGE_FRAME* pSurfaceImageFrame; hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_COLOR, width, height, nullptr, &pSurfaceImageFrame ); if( FAILED( hResult ) ){ std::cerr << "Error : NuiFusionCreateImageFrame( COLOR )" << std::endl; return -1; } // Normal NUI_FUSION_IMAGE_FRAME* pNormalImageFrame; hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_COLOR, width, height, nullptr, &pNormalImageFrame ); if( FAILED( hResult ) ){ std::cerr << "Error : NuiFusionCreateImageFrame( COLOR )" << std::endl; return -1; } cv::namedWindow( "Surface" ); cv::namedWindow( "Normal" ); while( 1 ){ // Frame IDepthFrame* pDepthFrame = nullptr; hResult = pDepthReader->AcquireLatestFrame( &pDepthFrame ); if( SUCCEEDED( hResult ) ){ hResult = pDepthFrame->AccessUnderlyingBuffer( &bufferSize, reinterpret_cast<UINT16**>( &bufferMat.data ) ); if( SUCCEEDED( hResult ) ){ bufferMat.convertTo( depthMat, CV_8U, -255.0f / 8000.0f, 255.0f ); hResult = pReconstruction->DepthToDepthFloatFrame( reinterpret_cast<UINT16*>( bufferMat.data ), width * height * sizeof( UINT16 ), pDepthFloatImageFrame, NUI_FUSION_DEFAULT_MINIMUM_DEPTH/* 0.5[m] */, NUI_FUSION_DEFAULT_MAXIMUM_DEPTH/* 8.0[m] */, true ); if( FAILED( hResult ) ){ std::cerr << "Error :INuiFusionReconstruction::DepthToDepthFloatFrame()" << std::endl; return -1; } } } SafeRelease( pDepthFrame ); // Smooting Depth Image Frame hResult = pReconstruction->SmoothDepthFloatFrame( pDepthFloatImageFrame, pSmoothDepthFloatImageFrame, 1, 0.04f ); if( FAILED( hResult ) ){ std::cerr << "Error :INuiFusionReconstruction::SmoothDepthFloatFrame" << std::endl; return -1; } // Reconstruction Process pReconstruction->GetCurrentWorldToCameraTransform( &worldToCameraTransform ); hResult = pReconstruction->ProcessFrame( pSmoothDepthFloatImageFrame, NUI_FUSION_DEFAULT_ALIGN_ITERATION_COUNT, NUI_FUSION_DEFAULT_INTEGRATION_WEIGHT, nullptr, &worldToCameraTransform ); if( FAILED( hResult ) ){ static int errorCount = 0; errorCount++; if( errorCount >= 100 ) { errorCount = 0; ResetReconstruction( pReconstruction, &worldToCameraTransform ); } } // Calculate Point Cloud hResult = pReconstruction->CalculatePointCloud( pPointCloudImageFrame, &worldToCameraTransform ); if( FAILED( hResult ) ){ std::cerr << "Error : CalculatePointCloud" << std::endl; return -1; } // Shading Point Clouid Matrix4 worldToBGRTransform = { 0.0f }; worldToBGRTransform.M11 = reconstructionParameter.voxelsPerMeter / reconstructionParameter.voxelCountX; worldToBGRTransform.M22 = reconstructionParameter.voxelsPerMeter / reconstructionParameter.voxelCountY; worldToBGRTransform.M33 = reconstructionParameter.voxelsPerMeter / reconstructionParameter.voxelCountZ; worldToBGRTransform.M41 = 0.5f; worldToBGRTransform.M42 = 0.5f; worldToBGRTransform.M43 = 0.0f; worldToBGRTransform.M44 = 1.0f; hResult = NuiFusionShadePointCloud( pPointCloudImageFrame, &worldToCameraTransform, &worldToBGRTransform, pSurfaceImageFrame, pNormalImageFrame ); if( FAILED( hResult ) ){ std::cerr << "Error : NuiFusionShadePointCloud" << std::endl; return -1; } cv::Mat surfaceMat( height, width, CV_8UC4, pSurfaceImageFrame->pFrameBuffer->pBits ); cv::Mat normalMat( height, width, CV_8UC4, pNormalImageFrame->pFrameBuffer->pBits ); cv::imshow( "Depth", depthMat ); cv::imshow( "Surface", surfaceMat ); cv::imshow( "Normal", normalMat ); int key = cv::waitKey( 30 ); if( key == VK_ESCAPE ){ break; } else if( key == 'r' ){ ResetReconstruction( pReconstruction, &worldToCameraTransform ); } } SafeRelease( pDepthSource ); SafeRelease( pDepthReader ); SafeRelease( pDescription ); SafeRelease( pCoordinateMapper ); SafeRelease( pReconstruction ); NuiFusionReleaseImageFrame( pDepthFloatImageFrame ); NuiFusionReleaseImageFrame( pSmoothDepthFloatImageFrame ); NuiFusionReleaseImageFrame( pPointCloudImageFrame ); NuiFusionReleaseImageFrame( pSurfaceImageFrame ); NuiFusionReleaseImageFrame( pNormalImageFrame ); if( pSensor ){ pSensor->Close(); } SafeRelease( pSensor ); cv::destroyAllWindows(); return 0; }
int main(int argc, char** argv) { // 1a. Get default Sensor std::cout << "Try to get default sensor" << std::endl; IKinectSensor* pSensor = nullptr; if (GetDefaultKinectSensor(&pSensor) != S_OK) { cerr << "Get Sensor failed" << std::endl; return -1; } // 1b. Open sensor std::cout << "Try to open sensor" << std::endl; if (pSensor->Open() != S_OK) { cerr << "Can't open sensor" << std::endl; return -1; } // 2. Color Related code IColorFrameReader* pColorFrameReader = nullptr; cv::Mat mColorImg; UINT uBufferSize = 0; { // 2a. Get color frame source std::cout << "Try to get color source" << std::endl; IColorFrameSource* pFrameSource = nullptr; if (pSensor->get_ColorFrameSource(&pFrameSource) != S_OK) { cerr << "Can't get color frame source" << std::endl; return -1; } // 2b. Get frame description std::cout << "get color frame description" << std::endl; int iWidth = 0; int iHeight = 0; IFrameDescription* pFrameDescription = nullptr; if (pFrameSource->get_FrameDescription(&pFrameDescription) == S_OK) { pFrameDescription->get_Width(&iWidth); pFrameDescription->get_Height(&iHeight); } pFrameDescription->Release(); pFrameDescription = nullptr; // 2c. get frame reader std::cout << "Try to get color frame reader" << std::endl; if (pFrameSource->OpenReader(&pColorFrameReader) != S_OK) { cerr << "Can't get color frame reader" << std::endl; return -1; } // 2d. release Frame source std::cout << "Release frame source" << std::endl; pFrameSource->Release(); pFrameSource = nullptr; // Prepare OpenCV data mColorImg = cv::Mat(iHeight, iWidth, CV_8UC4); uBufferSize = iHeight * iWidth * 4 * sizeof(BYTE); } // 3. Body related code IBodyFrameReader* pBodyFrameReader = nullptr; IBody** aBodyData = nullptr; INT32 iBodyCount = 0; { // 3a. Get frame source std::cout << "Try to get body source" << std::endl; IBodyFrameSource* pFrameSource = nullptr; if (pSensor->get_BodyFrameSource(&pFrameSource) != S_OK) { cerr << "Can't get body frame source" << std::endl; return -1; } // 3b. Get the number of body if (pFrameSource->get_BodyCount(&iBodyCount) != S_OK) { cerr << "Can't get body count" << std::endl; return -1; } std::cout << " > Can trace " << iBodyCount << " bodies" << std::endl; aBodyData = new IBody*[iBodyCount]; for (int i = 0; i < iBodyCount; ++i) aBodyData[i] = nullptr; // 3c. get frame reader std::cout << "Try to get body frame reader" << std::endl; if (pFrameSource->OpenReader(&pBodyFrameReader) != S_OK) { cerr << "Can't get body frame reader" << std::endl; return -1; } // 3d. release Frame source std::cout << "Release frame source" << std::endl; pFrameSource->Release(); pFrameSource = nullptr; } // 4. get CoordinateMapper ICoordinateMapper* pCoordinateMapper = nullptr; if (pSensor->get_CoordinateMapper(&pCoordinateMapper) != S_OK) { std::cout << "Can't get coordinate mapper" << std::endl; return -1; } // Enter main loop cv::namedWindow("Body Image"); // Debug:output the velocity of joints ofstream current_average_velocityTXT("current_average_velocity.txt"); ofstream average_velocityTXT("average_velocity.txt"); int frame_count = 0; int frame_count_for_standby = 0; float positionX0[25] = {0}; float positionX1[25] = {0}; float positionY0[25] = { 0 }; float positionY1[25] = { 0 }; float positionZ0[25] = { 0 }; float positionZ1[25] = { 0 }; float velocityX[25] = { 0 }; float velocityY[25] = { 0 }; float velocityZ[25] = { 0 }; float current_velocity[25] = { 0 }; float velocityee[8] = { 0 }; float current_total_velocity = 0; float current_average_velocity = 0; float total_velocity = 0; float average_velocity = 0; while (true) { // 4a. Get last frame IColorFrame* pColorFrame = nullptr; if (pColorFrameReader->AcquireLatestFrame(&pColorFrame) == S_OK) { // 4c. Copy to OpenCV image if (pColorFrame->CopyConvertedFrameDataToArray(uBufferSize, mColorImg.data, ColorImageFormat_Bgra) != S_OK) { cerr << "Data copy error" << endl; } // 4e. release frame pColorFrame->Release(); } cv::Mat mImg = mColorImg.clone(); // 4b. Get body data IBodyFrame* pBodyFrame = nullptr; if (pBodyFrameReader->AcquireLatestFrame(&pBodyFrame) == S_OK) { // 4b. get Body data if (pBodyFrame->GetAndRefreshBodyData(iBodyCount, aBodyData) == S_OK) { // 4c. for each body for (int i = 0; i < iBodyCount; ++i) { IBody* pBody = aBodyData[i]; // check if is tracked BOOLEAN bTracked = false; if ((pBody->get_IsTracked(&bTracked) == S_OK) && bTracked) { // get joint position Joint aJoints[JointType::JointType_Count]; if (pBody->GetJoints(JointType::JointType_Count, aJoints) == S_OK) { DrawLine(mImg, aJoints[JointType_SpineBase], aJoints[JointType_SpineMid], pCoordinateMapper); DrawLine(mImg, aJoints[JointType_SpineMid], aJoints[JointType_SpineShoulder], pCoordinateMapper); DrawLine(mImg, aJoints[JointType_SpineShoulder], aJoints[JointType_Neck], pCoordinateMapper); DrawLine(mImg, aJoints[JointType_Neck], aJoints[JointType_Head], pCoordinateMapper); DrawLine(mImg, aJoints[JointType_SpineShoulder], aJoints[JointType_ShoulderLeft], pCoordinateMapper); DrawLine(mImg, aJoints[JointType_ShoulderLeft], aJoints[JointType_ElbowLeft], pCoordinateMapper); DrawLine(mImg, aJoints[JointType_ElbowLeft], aJoints[JointType_WristLeft], pCoordinateMapper); DrawLine(mImg, aJoints[JointType_WristLeft], aJoints[JointType_HandLeft], pCoordinateMapper); DrawLine(mImg, aJoints[JointType_HandLeft], aJoints[JointType_HandTipLeft], pCoordinateMapper); DrawLine(mImg, aJoints[JointType_HandLeft], aJoints[JointType_ThumbLeft], pCoordinateMapper); DrawLine(mImg, aJoints[JointType_SpineShoulder], aJoints[JointType_ShoulderRight], pCoordinateMapper); DrawLine(mImg, aJoints[JointType_ShoulderRight], aJoints[JointType_ElbowRight], pCoordinateMapper); DrawLine(mImg, aJoints[JointType_ElbowRight], aJoints[JointType_WristRight], pCoordinateMapper); DrawLine(mImg, aJoints[JointType_WristRight], aJoints[JointType_HandRight], pCoordinateMapper); DrawLine(mImg, aJoints[JointType_HandRight], aJoints[JointType_HandTipRight], pCoordinateMapper); DrawLine(mImg, aJoints[JointType_HandRight], aJoints[JointType_ThumbRight], pCoordinateMapper); DrawLine(mImg, aJoints[JointType_SpineBase], aJoints[JointType_HipLeft], pCoordinateMapper); DrawLine(mImg, aJoints[JointType_HipLeft], aJoints[JointType_KneeLeft], pCoordinateMapper); DrawLine(mImg, aJoints[JointType_KneeLeft], aJoints[JointType_AnkleLeft], pCoordinateMapper); DrawLine(mImg, aJoints[JointType_AnkleLeft], aJoints[JointType_FootLeft], pCoordinateMapper); DrawLine(mImg, aJoints[JointType_SpineBase], aJoints[JointType_HipRight], pCoordinateMapper); DrawLine(mImg, aJoints[JointType_HipRight], aJoints[JointType_KneeRight], pCoordinateMapper); DrawLine(mImg, aJoints[JointType_KneeRight], aJoints[JointType_AnkleRight], pCoordinateMapper); DrawLine(mImg, aJoints[JointType_AnkleRight], aJoints[JointType_FootRight], pCoordinateMapper); } // Debug:print out the number of frame std::cout << "frame " << ++frame_count << std::endl; for (int j = 1; j < 8; j++) { velocityee[j] = velocityee[j-1]; total_velocity += velocityee[j]; } average_velocity = total_velocity / 8.0; if (average_velocity <= 0.0015) { // determine if the person is still if (frame_count_for_standby == 0) { PlaySound(TEXT("Alarm02.wav"), NULL, SND_FILENAME); std::cout << "Start capturing points!" << std::endl; } // count the number of frame whose velocity is below the threshold frame_count_for_standby++; if (frame_count_for_standby >= 5) { frame_count_for_standby = 0; } } // Debug:output the average velocity average_velocityTXT << frame_count << " " << average_velocity << std::endl; total_velocity = 0; // Update the average velocity int available_joints = 0; for (int i = 0; i < 25; i++) { // X positionX1[i] = positionX0[i]; positionX0[i] = aJoints[i].Position.X; velocityX[i] = (positionX1[i] - positionX0[i]) * (positionX1[i] - positionX0[i]); // Y positionY1[i] = positionY0[i]; positionY0[i] = aJoints[i].Position.Y; velocityY[i] = (positionY1[i] - positionY0[i]) * (positionY1[i] - positionY0[i]); // Z positionZ1[i] = positionZ0[i]; positionZ0[i] = aJoints[i].Position.Z; velocityZ[i] = (positionZ1[i] - positionZ0[i]) * (positionZ1[i] - positionZ0[i]); current_velocity[i] = sqrtf(velocityX[i] + velocityY[i] + velocityZ[i]); // exclude the discrete velocity if (current_velocity[i] < 0.01) { current_total_velocity += current_velocity[i]; available_joints++; } } // If no joint is available, save the velocity of last frame if (available_joints != 0) { current_average_velocity = current_total_velocity / available_joints; } velocityee[0] = current_average_velocity; // Debug:output the current average velocity current_average_velocityTXT << frame_count << " " << current_average_velocity << std::endl; current_total_velocity = 0; } } } else { cerr << "Can't read body data" << endl; } // 4e. release frame pBodyFrame->Release(); } // show image cv::imshow("Body Image",mImg); // 4c. check keyboard input if (cv::waitKey(30) == VK_ESCAPE) { break; } } // 3. delete body data array delete[] aBodyData; // 3. release frame reader std::cout << "Release body frame reader" << std::endl; pBodyFrameReader->Release(); pBodyFrameReader = nullptr; // 2. release color frame reader std::cout << "Release color frame reader" << std::endl; pColorFrameReader->Release(); pColorFrameReader = nullptr; // 1c. Close Sensor std::cout << "close sensor" << std::endl; pSensor->Close(); // 1d. Release Sensor std::cout << "Release sensor" << std::endl; pSensor->Release(); pSensor = nullptr; return 0; }
/* bool MyKinect::Process_AudioFrame(IMultiSourceFrame * pMultiSourceFrame, bool *returnbool) { if (!pMultiSourceFrame) { *returnbool = false; return false; } *returnbool = false; IDepthFrame * pDepthFrame = NULL; IDepthFrameReference *pDepthFrameReference = NULL; HRESULT hr = pMultiSourceFrame->get_(&pDepthFrameReference); if (SUCCEEDED(hr)) { hr = pDepthFrameReference->AcquireFrame(&pDepthFrame); } SafeRelease(pDepthFrameReference); if (SUCCEEDED(hr)) { IFrameDescription * pFrameDescription = NULL; int nWidth = 0; int nHeight = 0; // USHORT nDepthMinReliableDistance = 0; //USHORT nDepthMaxDistance = 0; UINT nBufferSize = 0; UINT16 *pBuffer = NULL; if (SUCCEEDED(hr)) { hr = pDepthFrame->get_FrameDescription(&pFrameDescription); } if (SUCCEEDED(hr)) { hr = pFrameDescription->get_Width(&nWidth); } if (SUCCEEDED(hr)) { hr = pFrameDescription->get_Height(&nHeight); } if (SUCCEEDED(hr)) { hr = pDepthFrame->get_DepthMinReliableDistance(&cDepthMinReliableDistance); } if (SUCCEEDED(hr)) { hr = pDepthFrame->get_DepthMaxReliableDistance(&cDepthMaxDistance); } if (SUCCEEDED(hr)) {//这里是将指针buffer提取出来了,没有拷贝 hr = pDepthFrame->AccessUnderlyingBuffer(&nBufferSize, &pBuffer);//这里的size是ushort而言的,memcopy是uchar来的。 } if (SUCCEEDED(hr)) { //int tempsize = cDepthHeight*cDepthWidth *sizeof(USHORT); memcpy(m_pDepthBuffer, pBuffer, nBufferSize*sizeof(USHORT)); *returnbool = true; //ProcessDepth(pBuffer, nWidth, nHeight, nDepthMinReliableDistance, nDepthMaxDistance); } SafeRelease(pFrameDescription);//Description 和Frame 都要释放的 } SafeRelease(pDepthFrame); return *returnbool; }*/ bool MyKinect::Process_DepthFrame(IMultiSourceFrame * pMultiSourceFrame, bool *returnbool) { if (!pMultiSourceFrame) { *returnbool = false; return false; } *returnbool = false; IDepthFrame * pDepthFrame = NULL; IDepthFrameReference *pDepthFrameReference = NULL; HRESULT hr = pMultiSourceFrame->get_DepthFrameReference(&pDepthFrameReference); if (SUCCEEDED(hr)) { hr = pDepthFrameReference->AcquireFrame(&pDepthFrame); } SafeRelease(pDepthFrameReference); /*if (!SUCCEEDED(hr)) { cout << " 深度帧丢失" << ++depthLostFrames << endl; }*/ if (SUCCEEDED(hr)) { IFrameDescription * pFrameDescription = NULL; int nWidth = 0; int nHeight = 0; // USHORT nDepthMinReliableDistance = 0; //USHORT nDepthMaxDistance = 0; UINT nBufferSize = 0; UINT16 *pBuffer = NULL; if (SUCCEEDED(hr)) { hr = pDepthFrame->get_FrameDescription(&pFrameDescription); } if (SUCCEEDED(hr)) { hr = pFrameDescription->get_Width(&nWidth); } if (SUCCEEDED(hr)) { hr = pFrameDescription->get_Height(&nHeight); } if (SUCCEEDED(hr)) { hr = pDepthFrame->get_DepthMinReliableDistance(&cDepthMinReliableDistance); } if (SUCCEEDED(hr)) { hr = pDepthFrame->get_DepthMaxReliableDistance(&cDepthMaxDistance); } if (SUCCEEDED(hr)) {//这里是将指针buffer提取出来了,没有拷贝 hr = pDepthFrame->AccessUnderlyingBuffer(&nBufferSize, &pBuffer);//这里的size是ushort而言的,memcopy是uchar来的。 } if (SUCCEEDED(hr)) { //int tempsize = cDepthHeight*cDepthWidth *sizeof(USHORT); memcpy(m_pDepthBuffer, pBuffer, nBufferSize*sizeof(USHORT)); *returnbool = true; //ProcessDepth(pBuffer, nWidth, nHeight, nDepthMinReliableDistance, nDepthMaxDistance); } SafeRelease(pFrameDescription);//Description 和Frame 都要释放的 } /* else if (colorDepthFramesynchronization)//深度帧没了,但是彩色帧处理了,就要用上一帧来 {//没用的, 已经用上一帧的coordinates处理了,这就多余了 } */ SafeRelease(pDepthFrame); return *returnbool; }
void UpdateColor() { if (!m_pColorFrameReader) { return; } IColorFrame* pColorFrame = NULL; HRESULT hr = m_pColorFrameReader->AcquireLatestFrame(&pColorFrame); if (SUCCEEDED(hr)) { INT64 nTime = 0; IFrameDescription* pFrameDescription = NULL; int nWidth = 0; int nHeight = 0; ColorImageFormat imageFormat = ColorImageFormat_None; UINT nBufferSize = 0; RGBQUAD *pBuffer = NULL; hr = pColorFrame->get_RelativeTime(&nTime); if (SUCCEEDED(hr)) { hr = pColorFrame->get_FrameDescription(&pFrameDescription); } if (SUCCEEDED(hr)) { hr = pFrameDescription->get_Width(&nWidth); } if (SUCCEEDED(hr)) { m_nColorWidth = nWidth; hr = pFrameDescription->get_Height(&nHeight); } if (SUCCEEDED(hr)) { m_nColorHeight = nHeight; hr = pColorFrame->get_RawColorImageFormat(&imageFormat); } if (SUCCEEDED(hr)) { if (imageFormat == ColorImageFormat_Bgra) { hr = pColorFrame->AccessRawUnderlyingBuffer(&nBufferSize, reinterpret_cast<BYTE**>(&pBuffer)); } else if (m_pColorRGBX) { pBuffer = m_pColorRGBX; nBufferSize = nWidth * nHeight * sizeof(RGBQUAD); hr = pColorFrame->CopyConvertedFrameDataToArray(nBufferSize, reinterpret_cast<BYTE*>(pBuffer), ColorImageFormat_Bgra); } else { hr = E_FAIL; } } if (SUCCEEDED(hr)) { { ProcessColor(nTime, pBuffer, nWidth, nHeight); } } SafeRelease(pFrameDescription); } else{ DumpHR(hr); } SafeRelease(pColorFrame); }
/// <summary> /// Main processing function /// </summary> void CCoordinateMappingBasics::Update() { if (!m_pMultiSourceFrameReader) { return; } IMultiSourceFrame* pMultiSourceFrame = NULL; IDepthFrame* pDepthFrame = NULL; IColorFrame* pColorFrame = NULL; IBodyIndexFrame* pBodyIndexFrame = NULL; IBodyFrame* pBodyFrame = NULL; HRESULT hr = m_pMultiSourceFrameReader->AcquireLatestFrame(&pMultiSourceFrame); if (SUCCEEDED(hr)) { IDepthFrameReference* pDepthFrameReference = NULL; hr = pMultiSourceFrame->get_DepthFrameReference(&pDepthFrameReference); if (SUCCEEDED(hr)) { hr = pDepthFrameReference->AcquireFrame(&pDepthFrame); } SafeRelease(pDepthFrameReference); } if (SUCCEEDED(hr)) { IColorFrameReference* pColorFrameReference = NULL; hr = pMultiSourceFrame->get_ColorFrameReference(&pColorFrameReference); if (SUCCEEDED(hr)) { hr = pColorFrameReference->AcquireFrame(&pColorFrame); } SafeRelease(pColorFrameReference); } if (SUCCEEDED(hr)) { IBodyIndexFrameReference* pBodyIndexFrameReference = NULL; hr = pMultiSourceFrame->get_BodyIndexFrameReference(&pBodyIndexFrameReference); if (SUCCEEDED(hr)) { hr = pBodyIndexFrameReference->AcquireFrame(&pBodyIndexFrame); } SafeRelease(pBodyIndexFrameReference); } if (SUCCEEDED(hr)) { IBodyFrameReference* pBodyFrameReference = NULL; hr = pMultiSourceFrame->get_BodyFrameReference(&pBodyFrameReference); if (SUCCEEDED(hr)) { hr = pBodyFrameReference->AcquireFrame(&pBodyFrame); } SafeRelease(pBodyFrameReference); } if (SUCCEEDED(hr)) { // Depth INT64 nDepthTime = 0; IFrameDescription* pDepthFrameDescription = NULL; int nDepthWidth = 0; int nDepthHeight = 0; UINT nDepthBufferSize = 0; UINT16 *pDepthBuffer = NULL; // Color IFrameDescription* pColorFrameDescription = NULL; int nColorWidth = 0; int nColorHeight = 0; ColorImageFormat imageFormat = ColorImageFormat_None; UINT nColorBufferSize = 0; RGBQUAD *pColorBuffer = NULL; // BodyIndex IFrameDescription* pBodyIndexFrameDescription = NULL; int nBodyIndexWidth = 0; int nBodyIndexHeight = 0; UINT nBodyIndexBufferSize = 0; BYTE *pBodyIndexBuffer = NULL; // Body IBody* ppBodies[BODY_COUNT] = { 0 }; // get depth frame data hr = pDepthFrame->get_RelativeTime(&nDepthTime); // Depth if (SUCCEEDED(hr)) { hr = pDepthFrame->get_FrameDescription(&pDepthFrameDescription); } if (SUCCEEDED(hr)) { hr = pDepthFrameDescription->get_Width(&nDepthWidth); } if (SUCCEEDED(hr)) { hr = pDepthFrameDescription->get_Height(&nDepthHeight); } if (SUCCEEDED(hr)) { hr = pDepthFrame->AccessUnderlyingBuffer(&nDepthBufferSize, &pDepthBuffer); } // get color frame data if (SUCCEEDED(hr)) { hr = pColorFrame->get_FrameDescription(&pColorFrameDescription); } if (SUCCEEDED(hr)) { hr = pColorFrameDescription->get_Width(&nColorWidth); } if (SUCCEEDED(hr)) { hr = pColorFrameDescription->get_Height(&nColorHeight); } if (SUCCEEDED(hr)) { hr = pColorFrame->get_RawColorImageFormat(&imageFormat); } if (SUCCEEDED(hr)) { if (imageFormat == ColorImageFormat_Bgra) { hr = pColorFrame->AccessRawUnderlyingBuffer(&nColorBufferSize, reinterpret_cast<BYTE**>(&pColorBuffer)); } else if (m_pColorRGBX) { pColorBuffer = m_pColorRGBX; nColorBufferSize = cColorWidth * cColorHeight * sizeof(RGBQUAD); hr = pColorFrame->CopyConvertedFrameDataToArray(nColorBufferSize, reinterpret_cast<BYTE*>(pColorBuffer), ColorImageFormat_Bgra); } else { hr = E_FAIL; } } // get body index frame data if (SUCCEEDED(hr)) { hr = pBodyIndexFrame->get_FrameDescription(&pBodyIndexFrameDescription); } if (SUCCEEDED(hr)) { hr = pBodyIndexFrameDescription->get_Width(&nBodyIndexWidth); } if (SUCCEEDED(hr)) { hr = pBodyIndexFrameDescription->get_Height(&nBodyIndexHeight); } if (SUCCEEDED(hr)) { hr = pBodyIndexFrame->AccessUnderlyingBuffer(&nBodyIndexBufferSize, &pBodyIndexBuffer); } // get body frame data if (SUCCEEDED(hr)) { hr = pBodyFrame->GetAndRefreshBodyData(_countof(ppBodies), ppBodies); } if (SUCCEEDED(hr)) { ProcessFrame(nDepthTime, pDepthBuffer, nDepthWidth, nDepthHeight, pColorBuffer, nColorWidth, nColorHeight, pBodyIndexBuffer, nBodyIndexWidth, nBodyIndexHeight, BODY_COUNT, ppBodies); } SafeRelease(pDepthFrameDescription); SafeRelease(pColorFrameDescription); SafeRelease(pBodyIndexFrameDescription); for (int i = 0; i < _countof(ppBodies); ++i) { SafeRelease(ppBodies[i]); } } SafeRelease(pDepthFrame); SafeRelease(pColorFrame); SafeRelease(pBodyIndexFrame); SafeRelease(pBodyFrame); SafeRelease(pMultiSourceFrame); }
void MyKinect2::Update() { // récupération de l'image en 2D if (!m_pColorFrameReader) { return; } IColorFrame* pColorFrame = NULL; HRESULT hr = m_pColorFrameReader->AcquireLatestFrame(&pColorFrame); if (SUCCEEDED(hr)) { INT64 nTime = 0; IFrameDescription* pFrameDescription = NULL; int nWidth = 0; int nHeight = 0; ColorImageFormat imageFormat = ColorImageFormat_None; UINT nBufferSize = 0; hr = pColorFrame->get_RelativeTime(&nTime); if (SUCCEEDED(hr)) { hr = pColorFrame->get_FrameDescription(&pFrameDescription); } if (SUCCEEDED(hr)) { hr = pFrameDescription->get_Width(&nWidth); } if (SUCCEEDED(hr)) { hr = pFrameDescription->get_Height(&nHeight); } if (SUCCEEDED(hr)) { hr = pColorFrame->get_RawColorImageFormat(&imageFormat); } if (SUCCEEDED(hr) && (nWidth == cColorWidth) && (nHeight == cColorHeight)) { if (imageFormat == ColorImageFormat_Bgra) { hr = pColorFrame->AccessRawUnderlyingBuffer(&nBufferSize, reinterpret_cast<BYTE**>(&webcam.data)); } else if (m_pColorRGBX) { nBufferSize = cColorWidth * cColorHeight * sizeof(RGBQUAD); hr = pColorFrame->CopyConvertedFrameDataToArray(nBufferSize, reinterpret_cast<BYTE*>(webcam.data), ColorImageFormat_Bgra); } else { hr = E_FAIL; } } SafeRelease(pFrameDescription); } // récupération du squelette if (!m_pBodyFrameReader) { return; } IBodyFrame* pBodyFrame = NULL; hr = m_pBodyFrameReader->AcquireLatestFrame(&pBodyFrame); if (SUCCEEDED(hr)) { INT64 nTime = 0; hr = pBodyFrame->get_RelativeTime(&nTime); IBody* ppBodies[BODY_COUNT] = {0}; if (SUCCEEDED(hr)) { hr = pBodyFrame->GetAndRefreshBodyData(_countof(ppBodies), ppBodies); } if (SUCCEEDED(hr)) { ProcessBody(BODY_COUNT, ppBodies); // BODY_COUNT est un define de Kinect.h égal à 6... peut etre mettre 1 plus tard afin déviter des problemes lors de la récupération des positions de joints } for (int i = 0; i < _countof(ppBodies); ++i) { SafeRelease(ppBodies[i]); } } SafeRelease(pBodyFrame); SafeRelease(pColorFrame); }
int main(int argc, char* argv[]) { int similarity = 0; string line; ifstream myfile ("source_config.txt"); if (myfile.is_open()) { getline (myfile,line); hauteurCamera = stoi(line); getline (myfile,line); fountainXPosition = stoi(line); getline (myfile,line); fountainYPosition = stoi(line); getline (myfile,line); fountainWidth = stoi(line); getline (myfile,line); blasterWidth = stoi(line); getline (myfile,line); int numberOfBlaster = stoi(line); for(int i = 0;i<numberOfBlaster;i++){ getline (myfile,line); blasterXPosition.push_back(stoi(line)); getline (myfile,line); blasterYPosition.push_back(stoi(line)); } myfile.close(); } else { cout << "Unable to open file"; exit(-1); } IKinectSensor* m_pKinectSensor; IDepthFrameReader* pDepthReader; IDepthFrameSource* pDepthFrameSource = NULL; // Depth image IColorFrameReader* pColorReader; IColorFrameSource* pColorFrameSource = NULL; HRESULT hr; hr = GetDefaultKinectSensor(&m_pKinectSensor); if (FAILED(hr)){ cout << "ColorTrackerv2 Error : GetDefaultKinectSensor failed." << endl; return false; } hr = m_pKinectSensor->Open(); if (FAILED(hr)){ cout << "ColorTrackerv2 Error : Open failed." << endl; return false; } hr = m_pKinectSensor->get_DepthFrameSource( &pDepthFrameSource ); if (FAILED(hr)){ cout << "ColorTrackerv2 Error : get_DepthFrameSource failed." << endl; return false; } hr = pDepthFrameSource->OpenReader( &pDepthReader ); if (FAILED(hr)){ cout << "ColorTrackerv2 Error : OpenReader failed." << endl; return false; } IFrameDescription* pDepthDescription; hr = pDepthFrameSource->get_FrameDescription( &pDepthDescription ); if( FAILED( hr ) ){ std::cerr << "Error : IDepthFrameSource::get_FrameDescription()" << std::endl; return -1; } w = 0,h = 0; pDepthDescription->get_Width( &w ); // 512 pDepthDescription->get_Height( &h ); // 424 //unsigned int irBufferSize = w * h * sizeof( unsigned short ); hr = m_pKinectSensor->get_ColorFrameSource( &pColorFrameSource ); if (FAILED(hr)){ cout << "ColorTrackerv2 Error : get_ColorFrameSource failed." << endl; return false; } hr = pColorFrameSource->OpenReader( &pColorReader ); if (FAILED(hr)){ cout << "ColorTrackerv2 Error : OpenReader failed." << endl; return false; } // Description IFrameDescription* pColorDescription; hr = pColorFrameSource->get_FrameDescription( &pColorDescription ); if( FAILED( hr ) ){ std::cerr << "Error : IColorFrameSource::get_FrameDescription()" << std::endl; return -1; } int colorWidth = 0; int colorHeight = 0; pColorDescription->get_Width( &colorWidth ); // 1920 pColorDescription->get_Height( &colorHeight ); // 1080 unsigned int colorBufferSize = colorWidth * colorHeight * 4 * sizeof( unsigned char ); Mat colorBufferMat = Mat::zeros( cvSize(colorWidth,colorHeight), CV_8UC4 ); //colorMat = Mat::zeros( cvSize(colorWidth/2,colorHeight/2), CV_8UC4 ); ICoordinateMapper* pCoordinateMapper; hr = m_pKinectSensor->get_CoordinateMapper( &pCoordinateMapper ); // open a opencv window cv::namedWindow("source_config", CV_WINDOW_AUTOSIZE ); setMouseCallback("source_config", MouseCallBackFunc, NULL); Mat frame(h,w, CV_8UC3, Scalar(255,255,255)); Mat display; //Mat img; char k = 0; while(k!=27){ HRESULT hResult = S_OK; if(displayColor){ IColorFrame* pColorFrame = nullptr; hResult = pColorReader->AcquireLatestFrame( &pColorFrame ); while(!SUCCEEDED(hResult)){ Sleep(50); hResult = pColorReader->AcquireLatestFrame(&pColorFrame); } if( SUCCEEDED( hResult ) ){ hResult = pColorFrame->CopyConvertedFrameDataToArray( colorBufferSize, reinterpret_cast<BYTE*>( colorBufferMat.data ), ColorImageFormat::ColorImageFormat_Bgra ); if( !SUCCEEDED( hResult ) ){ return false; } resize(colorBufferMat,display,Size(displaySize*w,displaySize*h)); flip(display,display,1); cv::line(display,Point(displaySize*w/2,0),Point(displaySize*w/2,displaySize*h),Scalar(0,0,255),2); cv::line(display,Point(0,displaySize*h/2),Point(displaySize*w,displaySize*h/2),Scalar(0,0,255),2); if (pColorFrame ) { pColorFrame ->Release(); pColorFrame = NULL; } } else return false; } else { IDepthFrame* pDepthFrame = nullptr; hResult = pDepthReader->AcquireLatestFrame( &pDepthFrame ); while(!SUCCEEDED(hResult)){ Sleep(10); hResult = pDepthReader->AcquireLatestFrame( &pDepthFrame ); } if( SUCCEEDED( hResult ) ){ unsigned int bufferSize = 0; unsigned short* buffer = nullptr; hResult = pDepthFrame->AccessUnderlyingBuffer( &bufferSize, &buffer ); if( SUCCEEDED( hResult ) ){ for( int y = 0; y < h; y++ ){ for( int x = 0; x < w; x++ ){ Vec3b intensity = frame.at<Vec3b>(y, x); if(buffer[ y * w + (w - x - 1) ] < hauteurCamera){ int d = buffer[ y * w + (w - x - 1) ]; intensity.val[0] = 2.55*(d % 100); intensity.val[1] = 1.22*(d % 200); intensity.val[2] = 256.0*d/hauteurCamera; } else { intensity.val[0] = 255; intensity.val[1] = 255; intensity.val[2] = 255; } /*intensity.val[0] = buffer[ y * w + x ] >> 8; intensity.val[1] = buffer[ y * w + x ] >> 8; intensity.val[2] = buffer[ y * w + x ] >> 8;*/ frame.at<Vec3b>(y, x) = intensity; } } // changer la couleur du rectangle en fonction de la hauteur des coins (similaire ou non) ( moins de 4cm) float d1 = buffer[(int)(fountainYPosition-fountainWidth/2.0)*w + (w-1-(int)(fountainXPosition-fountainWidth/2.0))]; float d2 = buffer[(int)(fountainYPosition-fountainWidth/2.0)*w + (w-1-(int)(fountainXPosition+fountainWidth/2.0))]; float d3 = buffer[(int)(fountainYPosition+fountainWidth/2.0)*w + (w-1-(int)(fountainXPosition-fountainWidth/2.0))]; float d4 = buffer[(int)(fountainYPosition+fountainWidth/2.0)*w + (w-1-(int)(fountainXPosition+fountainWidth/2.0))]; if((d1 < 0)||(d1>3500)||(d2 < 0)||(d2>3500)||(d3 < 0)||(d3>3500)||(d4 < 0)||(d4>3500)){ similarity = 0; } else { int mn = 100; if((abs(d1-d2) < mn) &&(abs(d1-d3) < mn) &&(abs(d1-d4) < mn) &&(abs(d2-d3) < mn) &&(abs(d2-d4) < mn) &&(abs(d3-d4) < mn)) similarity = 255; else{ int md = abs(d1-d2); md = MAX(md,abs(d1-d3)); md = MAX(md,abs(d1-d4)); md = MAX(md,abs(d2-d3)); md = MAX(md,abs(d2-d4)); md = MAX(md,abs(d3-d4)); if(md-mn>128) similarity = 0; else similarity = 128 - (md - mn); } } if(k=='s'){ // get hauteur camera // Depthframe get 3D position of 1m20 jets et les enregistrer dans un autre fichier pour etre charger par un Observer CameraSpacePoint cameraPoint = { 0 }; DepthSpacePoint depthPoint = { 0 }; UINT16 depth; // Compute hauteur camera, by average of four points float h = 0; int cptH = 0; float d; d = buffer[(int)(fountainYPosition-fountainWidth/2.0)*w + (w-1-(int)(fountainXPosition-fountainWidth/2.0))]; if((d>500)&&(d<3500)){h+=d; cptH++;} d = buffer[(int)(fountainYPosition-fountainWidth/2.0)*w + (w-1-(int)(fountainXPosition+fountainWidth/2.0))]; if((d>500)&&(d<3500)){h+=d; cptH++;} d = buffer[(int)(fountainYPosition+fountainWidth/2.0)*w + (w-1-(int)(fountainXPosition-fountainWidth/2.0))]; if((d>500)&&(d<3500)){h+=d; cptH++;} d = buffer[(int)(fountainYPosition+fountainWidth/2.0)*w + (w-1-(int)(fountainXPosition+fountainWidth/2.0))]; if((d>500)&&(d<3500)){h+=d; cptH++;} if(cptH>0){ //hauteurCamera = h/cptH; cout << "H = " << hauteurCamera << endl; } // Compute real size of blaster /*depthPoint.X = static_cast<float>(blasterXPosition[0] - blasterWidth); depthPoint.Y = static_cast<float>(blasterYPosition[0] - blasterWidth); depth = hauteurCamera - 1000.0f; // TODO change pCoordinateMapper->MapDepthPointToCameraSpace(depthPoint,depth,&cameraPoint); float corner1X = static_cast<float>(cameraPoint.X); float corner1Y = static_cast<float>(cameraPoint.Y); depthPoint.X = static_cast<float>(blasterXPosition[0] + blasterWidth); depthPoint.Y = static_cast<float>(blasterYPosition[0] + blasterWidth); pCoordinateMapper->MapDepthPointToCameraSpace(depthPoint,depth,&cameraPoint); float corner2X = static_cast<float>(cameraPoint.X); float corner2Y = static_cast<float>(cameraPoint.Y);*/ //float realBlasterWidth = 1000.0*(abs(corner2X-corner1X)+abs(corner2Y-corner1Y))/2.0; ofstream myfile; myfile.open ("source_config.txt"); myfile << hauteurCamera << "\n"; myfile << fountainXPosition << "\n"; myfile << fountainYPosition << "\n"; myfile << fountainWidth << "\n"; myfile << blasterWidth << "\n"; myfile << blasterXPosition.size() << "\n"; for(int i = 0;i<blasterXPosition.size();i++){ myfile << blasterXPosition[i] << "\n"; myfile << blasterYPosition[i] << "\n"; } myfile.close(); // save real positions to file myfile.open ("source3D.txt"); myfile << hauteurCamera << "\n"; myfile << blasterWidth << "\n"; myfile << blasterXPosition.size() << "\n"; for(int i = 0;i<blasterXPosition.size();i++){ depthPoint.X = static_cast<float>(blasterXPosition[i]); depthPoint.Y = static_cast<float>(blasterYPosition[i]); depth = hauteurCamera; pCoordinateMapper->MapDepthPointToCameraSpace(depthPoint,depth,&cameraPoint); cout << depthPoint.X << " - " << depthPoint.Y << endl; cout << static_cast<float>(cameraPoint.X) << " - " << static_cast<float>(cameraPoint.Y) << endl; pCoordinateMapper->MapCameraPointToDepthSpace(cameraPoint, &depthPoint); cout << depthPoint.X << " - " << depthPoint.Y << endl; cout << static_cast<float>(cameraPoint.X) << " - " << static_cast<float>(cameraPoint.Y) << endl; myfile << 1000.0*static_cast<float>(cameraPoint.X) << "\n"; myfile << 1000.0*static_cast<float>(cameraPoint.Y) << "\n"; } myfile.close(); } } else{ return false; } } else return false; if( pDepthFrame != NULL ){ pDepthFrame->Release(); pDepthFrame = NULL; } rectangle(frame, Rect(fountainXPosition-fountainWidth/2.0, fountainYPosition-fountainWidth/2.0, fountainWidth , fountainWidth), Scalar(0,similarity,255-similarity), 3); for(int i = -2; i <= 2; i++) { rectangle(frame, Rect(fountainXPosition - (i*blasterWidth) - (blasterWidth/2.0), fountainYPosition-fountainWidth/2.0, blasterWidth, fountainWidth), Scalar(0,255,0), 1); rectangle(frame, Rect(fountainXPosition-fountainWidth/2.0, fountainYPosition - (i*blasterWidth) - (blasterWidth/2.0), fountainWidth, blasterWidth), Scalar(0,255,0), 1); } char textbuffer [33]; for(int i = 0;i<blasterXPosition.size();i++){ sprintf(textbuffer,"%i",i+1); putText(frame,textbuffer, Point2f(blasterXPosition[i]-blasterWidth/2.0,blasterYPosition[i]), FONT_HERSHEY_PLAIN, 1, Scalar(0,0,255,255), 2); //rectangle(frame,Rect(blasterXPosition[i]-blasterWidth/2.0,blasterYPosition[i]-blasterWidth/2.0,blasterWidth,blasterWidth),Scalar(255,0,0)); circle(frame,Point(blasterXPosition[i],blasterYPosition[i]),blasterWidth/2.0,Scalar(255,0,0)); } resize(frame,display,Size(displaySize*w,displaySize*h)); } cv::imshow("source_config", display); k=cv::waitKey(1); if(k == 32) // Space displayColor = ! displayColor; if(k=='a') alignBuseFromLayout(); if(k=='r') resetFountainPosition(); } if (pDepthReader) { pDepthReader->Release(); pDepthReader = NULL; } if (pCoordinateMapper) { pCoordinateMapper->Release(); pCoordinateMapper = NULL; } m_pKinectSensor->Close(); if (m_pKinectSensor) { m_pKinectSensor->Release(); m_pKinectSensor = NULL; } //system("pause"); return 0; }
int main(int argc, char** argv) { // 1a. Get default Sensor cout << "Try to get default sensor" << endl; IKinectSensor* pSensor = nullptr; if (GetDefaultKinectSensor(&pSensor) != S_OK) { cerr << "Get Sensor failed" << endl; } else { // 1b. Open sensor cout << "Try to open sensor" << endl; if (pSensor->Open() != S_OK) { cerr << "Can't open sensor" << endl; } else { // 2a. Get frame source cout << "Try to get source" << endl; IDepthFrameSource* pFrameSource = nullptr; if (pSensor->get_DepthFrameSource(&pFrameSource) != S_OK) { cerr << "Can't get frame source" << endl; } else { // 2b. Get frame description int iWidth = 0; int iHeight = 0; IFrameDescription* pFrameDescription = nullptr; if (pFrameSource->get_FrameDescription(&pFrameDescription) == S_OK) { pFrameDescription->get_Width(&iWidth); pFrameDescription->get_Height(&iHeight); pFrameDescription->Release(); pFrameDescription = nullptr; } // 2c. get some dpeth only meta UINT16 uDepthMin = 0, uDepthMax = 0; pFrameSource->get_DepthMinReliableDistance(&uDepthMin); pFrameSource->get_DepthMaxReliableDistance(&uDepthMax); cout << "Reliable Distance: " << uDepthMin << " - " << uDepthMax << endl; // perpare OpenCV cv::Mat mDepthImg(iHeight, iWidth, CV_16UC1); cv::Mat mImg8bit(iHeight, iWidth, CV_8UC1); cv::namedWindow( "Depth Map" ); // 3a. get frame reader cout << "Try to get frame reader" << endl; IDepthFrameReader* pFrameReader = nullptr; if (pFrameSource->OpenReader(&pFrameReader) != S_OK) { cerr << "Can't get frame reader" << endl; } else { // Enter main loop cout << "Enter main loop" << endl; while (true) { // 4a. Get last frame IDepthFrame* pFrame = nullptr; if (pFrameReader->AcquireLatestFrame(&pFrame) == S_OK) { // 4c. copy the depth map to image if (pFrame->CopyFrameDataToArray(iWidth * iHeight, reinterpret_cast<UINT16*>(mDepthImg.data)) == S_OK) { // 4d. convert from 16bit to 8bit mDepthImg.convertTo(mImg8bit, CV_8U, 255.0f / uDepthMax); cv::imshow("Depth Map", mImg8bit); } else { cerr << "Data copy error" << endl; } // 4e. release frame pFrame->Release(); } // 4f. check keyboard input if (cv::waitKey(30) == VK_ESCAPE){ break; } } // 3b. release frame reader cout << "Release frame reader" << endl; pFrameReader->Release(); pFrameReader = nullptr; } // 2d. release Frame source cout << "Release frame source" << endl; pFrameSource->Release(); pFrameSource = nullptr; } // 1c. Close Sensor cout << "close sensor" << endl; pSensor->Close(); } // 1d. Release Sensor cout << "Release sensor" << endl; pSensor->Release(); pSensor = nullptr; } return 0; }
int _tmain( int argc, _TCHAR* argv[] ) { cv::setUseOptimized( true ); // Sensor IKinectSensor* pSensor; HRESULT hResult = S_OK; hResult = GetDefaultKinectSensor( &pSensor ); if( FAILED( hResult ) ){ std::cerr << "Error : GetDefaultKinectSensor" << std::endl; return -1; } hResult = pSensor->Open(); if( FAILED( hResult ) ){ std::cerr << "Error : IKinectSensor::Open()" << std::endl; return -1; } // Source IColorFrameSource* pColorSource; hResult = pSensor->get_ColorFrameSource( &pColorSource ); if( FAILED( hResult ) ){ std::cerr << "Error : IKinectSensor::get_ColorFrameSource()" << std::endl; return -1; } // Reader IColorFrameReader* pColorReader; hResult = pColorSource->OpenReader( &pColorReader ); if( FAILED( hResult ) ){ std::cerr << "Error : IColorFrameSource::OpenReader()" << std::endl; return -1; } // Description IFrameDescription* pDescription; hResult = pColorSource->get_FrameDescription( &pDescription ); if( FAILED( hResult ) ){ std::cerr << "Error : IColorFrameSource::get_FrameDescription()" << std::endl; return -1; } int width = 0; int height = 0; pDescription->get_Width( &width ); // 1920 pDescription->get_Height( &height ); // 1080 unsigned int bufferSize = width * height * 4 * sizeof( unsigned char ); cv::Mat bufferMat( height, width, CV_8UC4 ); cv::Mat colorMat( height / 2, width / 2, CV_8UC4 ); cv::namedWindow( "Color" ); // Subscribe Handle WAITABLE_HANDLE hColorWaitable; hResult = pColorReader->SubscribeFrameArrived( &hColorWaitable ); if( FAILED( hResult ) ){ std::cerr << "Error : IColorFrameReader::SubscribeFrameArrived()" << std::endl; return -1; } while( 1 ){ // Waitable Events HANDLE hEvents[ ] = { reinterpret_cast<HANDLE>( hColorWaitable ) }; WaitForMultipleObjects( ARRAYSIZE( hEvents ), hEvents, true, INFINITE ); // Arrived Data IColorFrameArrivedEventArgs* pColorArgs = nullptr; hResult = pColorReader->GetFrameArrivedEventData( hColorWaitable, &pColorArgs ); if( SUCCEEDED( hResult ) ){ // Reference IColorFrameReference* pColorReference = nullptr; hResult = pColorArgs->get_FrameReference( &pColorReference ); if( SUCCEEDED( hResult ) ){ // Frame IColorFrame* pColorFrame = nullptr; hResult = pColorReference->AcquireFrame( &pColorFrame ); if( SUCCEEDED( hResult ) ){ hResult = pColorFrame->CopyConvertedFrameDataToArray( bufferSize, reinterpret_cast<BYTE*>( bufferMat.data ), ColorImageFormat::ColorImageFormat_Bgra ); if( SUCCEEDED( hResult ) ){ cv::resize( bufferMat, colorMat, cv::Size(), 0.5, 0.5 ); } } SafeRelease( pColorFrame ); } SafeRelease( pColorReference ); } SafeRelease( pColorArgs ); cv::imshow( "Color", colorMat ); if( cv::waitKey( 30 ) == VK_ESCAPE ){ break; } } SafeRelease( pColorSource ); SafeRelease( pColorReader, hColorWaitable ); SafeRelease( pDescription ); if( pSensor ){ pSensor->Close(); } SafeRelease( pSensor ); cv::destroyAllWindows(); return 0; }
int main(int argc, char** argv) { // 1a. Get default Sensor cout << "Try to get default sensor" << endl; IKinectSensor* pSensor = nullptr; if (GetDefaultKinectSensor(&pSensor) != S_OK) { cerr << "Get Sensor failed" << endl; return -1; } // 1b. Open sensor cout << "Try to open sensor" << endl; if (pSensor->Open() != S_OK) { cerr << "Can't open sensor" << endl; return -1; } // 2a. Get frame source cout << "Try to get body index source" << endl; IBodyIndexFrameSource* pFrameSource = nullptr; if (pSensor->get_BodyIndexFrameSource(&pFrameSource) != S_OK) { cerr << "Can't get body index frame source" << endl; return -1; } // 2b. Get frame description cout << "get body index frame description" << endl; int iWidth = 0; int iHeight = 0; IFrameDescription* pFrameDescription = nullptr; if (pFrameSource->get_FrameDescription(&pFrameDescription) == S_OK) { pFrameDescription->get_Width(&iWidth); pFrameDescription->get_Height(&iHeight); } pFrameDescription->Release(); pFrameDescription = nullptr; // 3a. get frame reader cout << "Try to get body index frame reader" << endl; IBodyIndexFrameReader* pFrameReader = nullptr; if (pFrameSource->OpenReader(&pFrameReader) != S_OK) { cerr << "Can't get body index frame reader" << endl; return -1; } // 2c. release Frame source cout << "Release frame source" << endl; pFrameSource->Release(); pFrameSource = nullptr; // Prepare OpenCV data cv::Mat mImg(iHeight, iWidth, CV_8UC3); cv::namedWindow("Body Index Image"); // color array cv::Vec3b aColorTable[7] = { cv::Vec3b(255,0,0), cv::Vec3b(0,255,0), cv::Vec3b(0,0,255), cv::Vec3b(255,255,0), cv::Vec3b(255,0,255), cv::Vec3b(0,255,255), cv::Vec3b(0,0,0), }; // Enter main loop while (true) { // 4a. Get last frame IBodyIndexFrame* pFrame = nullptr; if (pFrameReader->AcquireLatestFrame(&pFrame) == S_OK) { // 4c. Fill OpenCV image UINT uSize = 0; BYTE* pBuffer = nullptr; if (pFrame->AccessUnderlyingBuffer(&uSize,&pBuffer) == S_OK) { for (int y = 0; y < iHeight; ++y) { for (int x = 0; x < iWidth; ++x) { int uBodyIdx = pBuffer[x + y * iWidth]; if (uBodyIdx < 6) mImg.at<cv::Vec3b>(y, x) = aColorTable[uBodyIdx]; else mImg.at<cv::Vec3b>(y, x) = aColorTable[6]; } } cv::imshow("Body Index Image", mImg); } else { cerr << "Data access error" << endl; } // 4e. release frame pFrame->Release(); } // 4f. check keyboard input if (cv::waitKey(30) == VK_ESCAPE){ break; } } // 3b. release frame reader cout << "Release frame reader" << endl; pFrameReader->Release(); pFrameReader = nullptr; // 1c. Close Sensor cout << "close sensor" << endl; pSensor->Close(); // 1d. Release Sensor cout << "Release sensor" << endl; pSensor->Release(); pSensor = nullptr; return 0; }
void Device::update() { if ( mSensor != 0 ) { mSensor->get_Status( &mStatus ); } if ( mFrameReader == 0 ) { return; } IAudioBeamFrame* audioFrame = 0; IBodyFrame* bodyFrame = 0; IBodyIndexFrame* bodyIndexFrame = 0; IColorFrame* colorFrame = 0; IDepthFrame* depthFrame = 0; IMultiSourceFrame* frame = 0; IInfraredFrame* infraredFrame = 0; ILongExposureInfraredFrame* infraredLongExposureFrame = 0; HRESULT hr = mFrameReader->AcquireLatestFrame( &frame ); // TODO audio if ( SUCCEEDED( hr ) ) { console() << "SUCCEEDED " << getElapsedFrames() << endl; } if ( SUCCEEDED( hr ) && mDeviceOptions.isBodyEnabled() ) { IBodyFrameReference* frameRef = 0; hr = frame->get_BodyFrameReference( &frameRef ); if ( SUCCEEDED( hr ) ) { hr = frameRef->AcquireFrame( &bodyFrame ); } if ( frameRef != 0 ) { frameRef->Release(); frameRef = 0; } } if ( SUCCEEDED( hr ) && mDeviceOptions.isBodyIndexEnabled() ) { IBodyIndexFrameReference* frameRef = 0; hr = frame->get_BodyIndexFrameReference( &frameRef ); if ( SUCCEEDED( hr ) ) { hr = frameRef->AcquireFrame( &bodyIndexFrame ); } if ( frameRef != 0 ) { frameRef->Release(); frameRef = 0; } } if ( SUCCEEDED( hr ) && mDeviceOptions.isColorEnabled() ) { IColorFrameReference* frameRef = 0; hr = frame->get_ColorFrameReference( &frameRef ); if ( SUCCEEDED( hr ) ) { hr = frameRef->AcquireFrame( &colorFrame ); } if ( frameRef != 0 ) { frameRef->Release(); frameRef = 0; } } if ( SUCCEEDED( hr ) && mDeviceOptions.isDepthEnabled() ) { IDepthFrameReference* frameRef = 0; hr = frame->get_DepthFrameReference( &frameRef ); if ( SUCCEEDED( hr ) ) { hr = frameRef->AcquireFrame( &depthFrame ); } if ( frameRef != 0 ) { frameRef->Release(); frameRef = 0; } } if ( SUCCEEDED( hr ) && mDeviceOptions.isInfraredEnabled() ) { IInfraredFrameReference* frameRef = 0; hr = frame->get_InfraredFrameReference( &frameRef ); if ( SUCCEEDED( hr ) ) { hr = frameRef->AcquireFrame( &infraredFrame ); } if ( frameRef != 0 ) { frameRef->Release(); frameRef = 0; } } if ( SUCCEEDED( hr ) && mDeviceOptions.isInfraredLongExposureEnabled() ) { ILongExposureInfraredFrameReference* frameRef = 0; hr = frame->get_LongExposureInfraredFrameReference( &frameRef ); if ( SUCCEEDED( hr ) ) { hr = frameRef->AcquireFrame( &infraredLongExposureFrame ); } if ( frameRef != 0 ) { frameRef->Release(); frameRef = 0; } } if ( SUCCEEDED( hr ) ) { long long time = 0L; // TODO audio IFrameDescription* bodyFrameDescription = 0; int32_t bodyWidth = 0; int32_t bodyHeight = 0; uint32_t bodyBufferSize = 0; uint8_t* bodyBuffer = 0; IFrameDescription* bodyIndexFrameDescription = 0; int32_t bodyIndexWidth = 0; int32_t bodyIndexHeight = 0; uint32_t bodyIndexBufferSize = 0; uint8_t* bodyIndexBuffer = 0; IFrameDescription* colorFrameDescription = 0; int32_t colorWidth = 0; int32_t colorHeight = 0; ColorImageFormat imageFormat = ColorImageFormat_None; uint32_t colorBufferSize = 0; uint8_t* colorBuffer = 0; IFrameDescription* depthFrameDescription = 0; int32_t depthWidth = 0; int32_t depthHeight = 0; uint16_t depthMinReliableDistance = 0; uint16_t depthMaxReliableDistance = 0; uint32_t depthBufferSize = 0; uint16_t* depthBuffer = 0; IFrameDescription* infraredFrameDescription = 0; int32_t infraredWidth = 0; int32_t infraredHeight = 0; uint32_t infraredBufferSize = 0; uint16_t* infraredBuffer = 0; IFrameDescription* infraredLongExposureFrameDescription = 0; int32_t infraredLongExposureWidth = 0; int32_t infraredLongExposureHeight = 0; uint32_t infraredLongExposureBufferSize = 0; uint16_t* infraredLongExposureBuffer = 0; hr = depthFrame->get_RelativeTime( &time ); // TODO audio if ( mDeviceOptions.isAudioEnabled() ) { } // TODO body if ( mDeviceOptions.isBodyEnabled() ) { } if ( mDeviceOptions.isBodyIndexEnabled() ) { if ( SUCCEEDED( hr ) ) { hr = bodyIndexFrame->get_FrameDescription( &bodyIndexFrameDescription ); } if ( SUCCEEDED( hr ) ) { hr = bodyIndexFrameDescription->get_Width( &bodyIndexWidth ); } if ( SUCCEEDED( hr ) ) { hr = bodyIndexFrameDescription->get_Height( &bodyIndexHeight ); } if ( SUCCEEDED( hr ) ) { //hr = bodyIndexFrame->AccessUnderlyingBuffer( &bodyIndexBufferSize, &bodyIndexBuffer ); } } if ( mDeviceOptions.isColorEnabled() ) { if ( SUCCEEDED( hr ) ) { hr = colorFrame->get_FrameDescription( &colorFrameDescription ); } if ( SUCCEEDED( hr ) ) { hr = colorFrameDescription->get_Width( &colorWidth ); } if ( SUCCEEDED( hr ) ) { hr = colorFrameDescription->get_Height( &colorHeight ); } if ( SUCCEEDED( hr ) ) { hr = colorFrame->get_RawColorImageFormat( &imageFormat ); } if ( SUCCEEDED( hr ) ) { bool isAllocated = false; SurfaceChannelOrder channelOrder = SurfaceChannelOrder::BGRA; if ( imageFormat == ColorImageFormat_Bgra ) { hr = colorFrame->AccessRawUnderlyingBuffer( &colorBufferSize, reinterpret_cast<uint8_t**>( &colorBuffer ) ); channelOrder = SurfaceChannelOrder::BGRA; } else if ( imageFormat == ColorImageFormat_Rgba ) { hr = colorFrame->AccessRawUnderlyingBuffer( &colorBufferSize, reinterpret_cast<uint8_t**>( &colorBuffer ) ); channelOrder = SurfaceChannelOrder::RGBA; } else { isAllocated = true; colorBufferSize = colorWidth * colorHeight * sizeof( uint8_t ) * 4; colorBuffer = new uint8_t[ colorBufferSize ]; hr = colorFrame->CopyConvertedFrameDataToArray( colorBufferSize, reinterpret_cast<uint8_t*>( colorBuffer ), ColorImageFormat_Rgba ); channelOrder = SurfaceChannelOrder::RGBA; } if ( SUCCEEDED( hr ) ) { colorFrame->get_RelativeTime( &time ); Surface8u colorSurface = Surface8u( colorBuffer, colorWidth, colorHeight, colorWidth * sizeof( uint8_t ) * 4, channelOrder ); mFrame.mSurfaceColor = Surface8u( colorWidth, colorHeight, false, channelOrder ); mFrame.mSurfaceColor.copyFrom( colorSurface, colorSurface.getBounds() ); console() << "Color\n\twidth: " << colorWidth << "\n\theight: " << colorHeight << "\n\tbuffer size: " << colorBufferSize << "\n\ttime: " << time << endl; } if ( isAllocated && colorBuffer != 0 ) { delete[] colorBuffer; colorBuffer = 0; } } } if ( mDeviceOptions.isDepthEnabled() ) { if ( SUCCEEDED( hr ) ) { hr = depthFrame->get_FrameDescription( &depthFrameDescription ); } if ( SUCCEEDED( hr ) ) { hr = depthFrameDescription->get_Width( &depthWidth ); } if ( SUCCEEDED( hr ) ) { hr = depthFrameDescription->get_Height( &depthHeight ); } if ( SUCCEEDED( hr ) ) { hr = depthFrame->get_DepthMinReliableDistance( &depthMinReliableDistance ); } if ( SUCCEEDED( hr ) ) { hr = depthFrame->get_DepthMaxReliableDistance( &depthMaxReliableDistance ); } if ( SUCCEEDED( hr ) ) { hr = depthFrame->AccessUnderlyingBuffer( &depthBufferSize, &depthBuffer ); } if ( SUCCEEDED( hr ) ) { Channel16u depthChannel = Channel16u( depthWidth, depthHeight, depthWidth * sizeof( uint16_t ), 1, depthBuffer ); mFrame.mChannelDepth = Channel16u( depthWidth, depthHeight ); mFrame.mChannelDepth.copyFrom( depthChannel, depthChannel.getBounds() ); console( ) << "Depth\n\twidth: " << depthWidth << "\n\theight: " << depthHeight << endl; } } if ( mDeviceOptions.isInfraredEnabled() ) { if ( SUCCEEDED( hr ) ) { hr = infraredFrame->get_FrameDescription( &infraredFrameDescription ); } if ( SUCCEEDED( hr ) ) { hr = infraredFrameDescription->get_Width( &infraredWidth ); } if ( SUCCEEDED( hr ) ) { hr = infraredFrameDescription->get_Height( &infraredHeight ); } if ( SUCCEEDED( hr ) ) { hr = infraredFrame->AccessUnderlyingBuffer( &infraredBufferSize, &infraredBuffer ); } if ( SUCCEEDED( hr ) ) { Channel16u infraredChannel = Channel16u( infraredWidth, infraredHeight, infraredWidth * sizeof( uint16_t ), 1, infraredBuffer ); mFrame.mChannelInfrared = Channel16u( infraredWidth, infraredHeight ); mFrame.mChannelInfrared.copyFrom( infraredChannel, infraredChannel.getBounds() ); console( ) << "Infrared\n\twidth: " << infraredWidth << "\n\theight: " << infraredHeight << endl; } } if ( mDeviceOptions.isInfraredLongExposureEnabled() ) { if ( SUCCEEDED( hr ) ) { hr = infraredLongExposureFrame->get_FrameDescription( &infraredLongExposureFrameDescription ); } if ( SUCCEEDED( hr ) ) { hr = infraredLongExposureFrameDescription->get_Width( &infraredLongExposureWidth ); } if ( SUCCEEDED( hr ) ) { hr = infraredLongExposureFrameDescription->get_Height( &infraredLongExposureHeight ); } if ( SUCCEEDED( hr ) ) { hr = infraredLongExposureFrame->AccessUnderlyingBuffer( &infraredLongExposureBufferSize, &infraredLongExposureBuffer ); } if ( SUCCEEDED( hr ) ) { Channel16u infraredLongExposureChannel = Channel16u( infraredLongExposureWidth, infraredLongExposureHeight, infraredLongExposureWidth * sizeof( uint16_t ), 1, infraredLongExposureBuffer ); mFrame.mChannelInfraredLongExposure = Channel16u( infraredLongExposureWidth, infraredLongExposureHeight ); mFrame.mChannelInfraredLongExposure.copyFrom( infraredLongExposureChannel, infraredLongExposureChannel.getBounds() ); int64_t irLongExpTime = 0; hr = infraredLongExposureFrame->get_RelativeTime( &irLongExpTime ); console( ) << "Infrared Long Exposure\n\twidth: " << infraredLongExposureWidth << "\n\theight: " << infraredLongExposureHeight; if ( SUCCEEDED( hr ) ) { console() << "\n\ttimestamp: " << irLongExpTime; } console() << endl; } } if ( SUCCEEDED( hr ) ) { // TODO build Kinect2::Frame from buffers, data mFrame.mTimeStamp = time; } if ( bodyFrameDescription != 0 ) { bodyFrameDescription->Release(); bodyFrameDescription = 0; } if ( bodyIndexFrameDescription != 0 ) { bodyIndexFrameDescription->Release(); bodyIndexFrameDescription = 0; } if ( colorFrameDescription != 0 ) { colorFrameDescription->Release(); colorFrameDescription = 0; } if ( depthFrameDescription != 0 ) { depthFrameDescription->Release(); depthFrameDescription = 0; } if ( infraredFrameDescription != 0 ) { infraredFrameDescription->Release(); infraredFrameDescription = 0; } if ( infraredLongExposureFrameDescription != 0 ) { infraredLongExposureFrameDescription->Release(); infraredLongExposureFrameDescription = 0; } } if ( audioFrame != 0 ) { audioFrame->Release(); audioFrame = 0; } if ( bodyFrame != 0 ) { bodyFrame->Release(); bodyFrame = 0; } if ( bodyIndexFrame != 0 ) { bodyIndexFrame->Release(); bodyIndexFrame = 0; } if ( colorFrame != 0 ) { colorFrame->Release(); colorFrame = 0; } if ( depthFrame != 0 ) { depthFrame->Release(); depthFrame = 0; } if ( frame != 0 ) { frame->Release(); frame = 0; } if ( infraredFrame != 0 ) { infraredFrame->Release(); infraredFrame = 0; } if ( infraredLongExposureFrame != 0 ) { infraredLongExposureFrame->Release(); infraredLongExposureFrame = 0; } }
void KinectManager::Update(void) { if (!m_pColorFrameReader) { return; } IColorFrame* pColorFrame = NULL; HRESULT hr = m_pColorFrameReader->AcquireLatestFrame(&pColorFrame); if (SUCCEEDED(hr)) { INT64 nTime = 0; IFrameDescription* pFrameDescription = NULL; int nWidth = 0; int nHeight = 0; ColorImageFormat imageFormat = ColorImageFormat_None; hr = pColorFrame->get_RelativeTime(&nTime); if (SUCCEEDED(hr)) { hr = pColorFrame->get_FrameDescription(&pFrameDescription); } if (SUCCEEDED(hr)) { hr = pFrameDescription->get_Width(&nWidth); } if (SUCCEEDED(hr)) { hr = pFrameDescription->get_Height(&nHeight); } if (SUCCEEDED(hr)) { hr = pFrameDescription->get_BytesPerPixel(&color_frame_bytes_per_pixel); } if (SUCCEEDED(hr)) { hr = pFrameDescription->get_LengthInPixels(&color_frame_length_in_pixels); } if (SUCCEEDED(hr)) { hr = pColorFrame->get_RawColorImageFormat(&imageFormat); } if (SUCCEEDED(hr)) { if (imageFormat == ColorImageFormat_Rgba) { UINT nBufferSize = KINECT_STREAM_COLOR_WIDTH * KINECT_STREAM_COLOR_HEIGHT * color_frame_bytes_per_pixel; hr = pColorFrame->AccessRawUnderlyingBuffer(&nBufferSize, reinterpret_cast<BYTE**>(&m_pColorRGBX)); } else if (m_pColorRGBX) { IFrameDescription* pRGBFrameDescription = NULL; pColorFrame->CreateFrameDescription(ColorImageFormat_Rgba, &pRGBFrameDescription); unsigned int clpp = 0; pRGBFrameDescription->get_BytesPerPixel(&clpp); unsigned int clip = 0; pRGBFrameDescription->get_LengthInPixels(&clip); color_frame_bytes_per_pixel = clpp; color_frame_length_in_pixels = clip; UINT nBufferSize = clip * clpp; hr = pColorFrame->CopyConvertedFrameDataToArray(nBufferSize, (reinterpret_cast<BYTE*>(&m_pColorRGBX)), ColorImageFormat_Rgba); } else { hr = E_FAIL; } } SafeRelease(pFrameDescription); } SafeRelease(pColorFrame); }
int _tmain( int argc, _TCHAR* argv[] ) { cv::setUseOptimized( true ); // Sensor IKinectSensor* pSensor; HRESULT hResult = S_OK; hResult = GetDefaultKinectSensor( &pSensor ); if( FAILED( hResult ) ){ std::cerr << "Error : GetDefaultKinectSensor" << std::endl; return -1; } hResult = pSensor->Open(); if( FAILED( hResult ) ){ std::cerr << "Error : IKinectSensor::Open()" << std::endl; return -1; } // Source IBodyIndexFrameSource* pBodyIndexSource; hResult = pSensor->get_BodyIndexFrameSource( &pBodyIndexSource ); if( FAILED( hResult ) ){ std::cerr << "Error : IKinectSensor::get_BodyIndexFrameSource()" << std::endl; return -1; } // Reader IBodyIndexFrameReader* pBodyIndexReader; hResult = pBodyIndexSource->OpenReader( &pBodyIndexReader ); if( FAILED( hResult ) ){ std::cerr << "Error : IBodyIndexFrameSource::OpenReader()" << std::endl; return -1; } // Description IFrameDescription* pDescription; hResult = pBodyIndexSource->get_FrameDescription( &pDescription ); if( FAILED( hResult ) ){ std::cerr << "Error : IBodyIndexFrameSource::get_FrameDescription()" << std::endl; return -1; } int width = 0; int height = 0; pDescription->get_Width( &width ); // 512 pDescription->get_Height( &height ); // 424 cv::Mat bodyIndexMat( height, width, CV_8UC3 ); cv::namedWindow( "BodyIndex" ); // Color Table cv::Vec3b color[BODY_COUNT]; color[0] = cv::Vec3b( 255, 0, 0 ); color[1] = cv::Vec3b( 0, 255, 0 ); color[2] = cv::Vec3b( 0, 0, 255 ); color[3] = cv::Vec3b( 255, 255, 0 ); color[4] = cv::Vec3b( 255, 0, 255 ); color[5] = cv::Vec3b( 0, 255, 255 ); while( 1 ){ // Frame IBodyIndexFrame* pBodyIndexFrame = nullptr; hResult = pBodyIndexReader->AcquireLatestFrame( &pBodyIndexFrame ); if( SUCCEEDED( hResult ) ){ unsigned int bufferSize = 0; unsigned char* buffer = nullptr; hResult = pBodyIndexFrame->AccessUnderlyingBuffer( &bufferSize, &buffer ); if( SUCCEEDED( hResult ) ){ for( int y = 0; y < height; y++ ){ for( int x = 0; x < width; x++ ){ unsigned int index = y * width + x; if( buffer[index] != 0xff ){ bodyIndexMat.at<cv::Vec3b>( y, x ) = color[buffer[index]]; } else{ bodyIndexMat.at<cv::Vec3b>( y, x ) = cv::Vec3b( 0, 0, 0 ); } } } } } SafeRelease( pBodyIndexFrame ); cv::imshow( "BodyIndex", bodyIndexMat ); if( cv::waitKey( 30 ) == VK_ESCAPE ){ break; } } SafeRelease( pBodyIndexSource ); SafeRelease( pBodyIndexReader ); SafeRelease( pDescription ); if( pSensor ){ pSensor->Close(); } SafeRelease( pSensor ); cv::destroyAllWindows(); return 0; }