Exemple #1
0
void main()
{
    try {
        kinect::nui::Kinect kinect;
        kinect.Initialize( NUI_INITIALIZE_FLAG_USES_COLOR | NUI_INITIALIZE_FLAG_USES_DEPTH );

        kinect::nui::ImageStream& video = kinect.VideoStream();
        video.Open( NUI_IMAGE_TYPE_COLOR, NUI_IMAGE_RESOLUTION_640x480 );

        kinect::nui::ImageStream& depth = kinect.DepthStream();
        depth.Open( NUI_IMAGE_TYPE_DEPTH, NUI_IMAGE_RESOLUTION_640x480 );

        // OpenCVの初期設定
        char* windowName = "depth";
        ::cvNamedWindow( windowName );
        cv::Ptr< IplImage > videoImg = ::cvCreateImage( cvSize(video.Width(), video.Height()), IPL_DEPTH_8U, 4 );

        // 描画用フォントの作成
        CvFont font;
        ::cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 1, 1, 10);

        while ( 1 ) {
            // データの更新を待つ
            kinect.WaitAndUpdateAll();

            // 次のフレームのデータを取得する(OpenNIっぽく)
            kinect::nui::ImageFrame imageMD( video );
            kinect::nui::DepthFrame depthMD( depth );

            // データのコピーと表示
            memcpy( videoImg->imageData, (BYTE*)imageMD.Bits(), videoImg->widthStep * videoImg->height );

            // 中心にある物体までの距離を測って表示する
            CvPoint point = cvPoint( depthMD.Width() / 2, depthMD.Height() / 2);
            ::cvCircle( videoImg, point, 10, cvScalar(0, 0, 0), -1 );

            std::stringstream ss;
            ss << depthMD( point.x, point.y ) << "mm";
            ::cvPutText( videoImg, ss.str().c_str(), point, &font, cvScalar(0, 0, 255) );

            ::cvShowImage( windowName, videoImg );

            int key = ::cvWaitKey( 10 );
            if ( key == 'q' ) {
                break;
            }
        }

        ::cvDestroyAllWindows();
    }
    catch ( std::exception& ex ) {
        std::cout << ex.what() << std::endl;
    }
}
void main()
{
    try {
        kinect::nui::Kinect kinect;
        kinect.Initialize( NUI_INITIALIZE_FLAG_USES_COLOR | NUI_INITIALIZE_FLAG_USES_DEPTH_AND_PLAYER_INDEX);

        kinect::nui::ImageStream& video = kinect.VideoStream();
        video.Open( NUI_IMAGE_TYPE_COLOR, NUI_IMAGE_RESOLUTION_640x480 );

        kinect::nui::ImageStream& depth = kinect.DepthStream();
        depth.Open( NUI_IMAGE_TYPE_DEPTH_AND_PLAYER_INDEX, NUI_IMAGE_RESOLUTION_320x240 );

        // OpenCVの初期設定
        char* windowName = "player";
        ::cvNamedWindow( windowName );
        cv::Ptr< IplImage > playerImg = ::cvCreateImage( cvSize(video.Width(), video.Height()), IPL_DEPTH_8U, 4 );

        while ( 1 ) {
            // データの更新を待つ
            kinect.WaitAndUpdateAll();

            // 次のフレームのデータを取得する(OpenNIっぽく)
            kinect::nui::VideoFrame videoMD( video );
            kinect::nui::DepthFrame depthMD( depth );

            // データのコピーと表示
            UINT* img = (UINT*)playerImg->imageData;
            for ( int y = 0; y < videoMD.Height(); ++y ) {
                for ( int x = 0; x < videoMD.Width(); ++x, ++img ) {
                    *img = videoMD( x, y ) & colors[depthMD( x / 2, y / 2 ) & 0x7];
                }
            }

            ::cvShowImage( windowName, playerImg );

            int key = ::cvWaitKey( 10 );
            if ( key == 'q' ) {
                break;
            }
        }

        ::cvDestroyAllWindows();
    }
    catch ( std::exception& ex ) {
        std::cout << ex.what() << std::endl;
    }
}
Exemple #3
0
osaOpenNI::Errno osaOpenNI::GetRangeData(vctDynamicMatrixRef<vctFloat3> rangedata)
{
    // Get data
    xn::DepthMetaData depthMD;
    Data->depthgenerator.GetMetaData(depthMD);

    const unsigned int width  = depthMD.XRes();
    const unsigned int height = depthMD.YRes();

    // check image size
    if (rangedata.cols() != width ||
        rangedata.rows() != height) {
        CMN_LOG_RUN_ERROR << "cisstOpenNI::GetRangeData - data size mismatch" << std::endl;
        return osaOpenNI::EFAILURE;
    }

    // update buffer size if needed
    const unsigned int size = width * height;
    if (PointsBufferSize < size) {
        if (ProjectivePointsBuffer) delete ProjectivePointsBuffer;
        if (WorldPointsBuffer)      delete WorldPointsBuffer;
        ProjectivePointsBuffer = new char[size * sizeof(XnPoint3D)];
        WorldPointsBuffer      = new char[size * sizeof(XnPoint3D)];
        if (!ProjectivePointsBuffer || !WorldPointsBuffer) {
            CMN_LOG_RUN_ERROR << "cisstOpenNI::GetRangeData - failed to allocate data buffer" << std::endl;
            return osaOpenNI::EFAILURE;
        }
    }

    XnPoint3D* proj = reinterpret_cast<XnPoint3D*>(ProjectivePointsBuffer);
    XnPoint3D* wrld = reinterpret_cast<XnPoint3D*>(WorldPointsBuffer);

    // create projective coordinates
    unsigned int i, x, y;
    for(i = 0, x = 0; x < width; x ++) {
        for(y = 0; y < height; i ++, y ++) {
            proj[i].X = static_cast<XnFloat>(x);
            proj[i].Y = static_cast<XnFloat>(y);
            proj[i].Z = static_cast<XnFloat>(depthMD(x, y));
        }
    }

    // convert projective to 3D
    XnStatus status = Data->depthgenerator.ConvertProjectiveToRealWorld(size, proj, wrld);
    if (status != XN_STATUS_OK) {
        CMN_LOG_RUN_ERROR << "cisstOpenNI::GetRangeData - failed to convert projective to world: "
                          << xnGetStatusString( status ) << std::endl;
    }

    for (i = 0, x = 0; x < width; x ++) {
        for (y = 0; y < height; i ++, y ++) {
            rangedata.Element(y, x).Assign(-wrld[i].X / 1000.0,
                                            wrld[i].Y / 1000.0,
                                            wrld[i].Z / 1000.0);
        }
    }

    return osaOpenNI::ESUCCESS;
}
void CPointCloud::Save6D(int r_world_x,int r_world_y,double r_world_rad)
{
	//world_xはロボットの世界座標系での自己位置

	//PointCloud準備
	pcl::PointCloud<pcl::PointXYZRGB>::Ptr points(new pcl::PointCloud<pcl::PointXYZRGB>);
	points->width = IMAGE_WIDTH;
	points->height = IMAGE_HEIGHT;

	//PointCloud
	for(int j=0 ; j<imageMD.YRes() ; j++)
	{
		for(int i=0; i<imageMD.XRes() ;i++)
		{
			if(depthMD(i,j)!=0 && 999<depthMD(i,j) && depthMD(i,j)<2000)
			{
				double camera_x,camera_y,camera_z;
				pcl::PointXYZRGB point;
				//camera_xはロボットからの視点の距離
				camera_x =  PIXEL_RANGE * depthMD(i,j) * (i-(double)(imageMD.XRes())/2);
				camera_y =  PIXEL_RANGE * depthMD(i,j) * ((double)(imageMD.YRes())/2-j);
				camera_z =  depthMD(i,j);

				//world.xは世界座標系
				double world_x = camera_x * cos(r_world_rad) - camera_z * sin(r_world_rad) + r_world_x;
				double world_y = camera_x * sin(r_world_rad) + camera_z * cos(-r_world_rad) + r_world_y;
				double world_z =  camera_y + CAMERA_HIGHT;

				//point.x
				point.x = world_x;
				point.y = world_z;
				point.z = - world_y;

				//テクスチャ
				point.b = camera->imageData[camera->widthStep*j+i*camera->nChannels];
				point.g = camera->imageData[camera->widthStep*j+i*camera->nChannels+1];
				point.r = camera->imageData[camera->widthStep*j+i*camera->nChannels+2];
		
				points->push_back(point);
			}
		}
	}
	if(pcl_viewer_flag == false)
	{
		cloud = points;
		pcl_viewer_flag = true;
	}else{
		*cloud += *points;
	}
	viewer->showCloud(cloud);
}
Exemple #5
0
int main (int argc, char * argv[])
{
  IplImage* camera = 0;

  try {
    // コンテキストの作成
    xn::Context context;
    XnStatus rc = context.InitFromXmlFile(CONFIG_XML_PATH);
    if (rc != XN_STATUS_OK) {
      throw std::runtime_error(xnGetStatusString(rc));
    }

    // イメージジェネレータの作成
    xn::ImageGenerator image;
    rc = context.FindExistingNode(XN_NODE_TYPE_IMAGE, image);
    if (rc != XN_STATUS_OK) {
      throw std::runtime_error(xnGetStatusString(rc));
    }

    // デプスジェネレータの作成
    xn::DepthGenerator depth;
    rc = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth);
    if (rc != XN_STATUS_OK) {
      throw std::runtime_error(xnGetStatusString(rc));
    }

    // デプスの座標をイメージに合わせる
    xn::AlternativeViewPointCapability viewPoint =
                                depth.GetAlternativeViewPointCap();
    viewPoint.SetViewPoint(image);

    // カメラサイズのイメージを作成(8bitのRGB)
    xn::ImageMetaData imageMD;
    image.GetMetaData(imageMD);
    camera = ::cvCreateImage(cvSize(imageMD.XRes(), imageMD.YRes()),
      IPL_DEPTH_8U, 3);
    if (!camera) {
      throw std::runtime_error("error : cvCreateImage");
    }

    // メインループ
    while (1) {
      // すべての更新を待ち、画像およびデプスデータを取得する
      context.WaitAndUpdateAll();

      xn::ImageMetaData imageMD;
      image.GetMetaData(imageMD);

      xn::DepthMetaData depthMD;
      depth.GetMetaData(depthMD);

      // デプスマップの作成
      depth_hist depthHist = getDepthHistgram(depth, depthMD);

      // 一定以上の距離は描画しない
      xn::RGB24Map& rgb = imageMD.WritableRGB24Map();
      for (XnUInt y = 0; y < imageMD.YRes(); ++y) {
        for (XnUInt x = 0; x < imageMD.XRes(); ++x) {
          const XnDepthPixel& depth = depthMD(x, y);
          if (depth > 1000) {
            XnRGB24Pixel& pixel = rgb(x, y);
            pixel.nRed   = 255;
            pixel.nGreen = 255;
            pixel.nBlue  = 255;
          }
        }
      }

      // 画像の表示
      memcpy(camera->imageData, imageMD.RGB24Data(), camera->imageSize);
      ::cvCvtColor(camera, camera, CV_RGB2BGR);
      ::cvShowImage("KinectImage", camera);

      // 終了する
      char key = cvWaitKey(10);
      if (key == 'q') {
        break;
      }
    }
  }
  catch (std::exception& ex) {
    std::cout << ex.what() << std::endl;
  }

  ::cvReleaseImage(&camera);

  return 0;
}
Exemple #6
0
osaOpenNI::Errno
osaOpenNI::GetRangeData(vctDynamicMatrix<double>& rangedata,
                        const std::vector< vctFixedSizeVector<unsigned short,2> >& pixels) {

    // Get data
    xn::DepthMetaData depthMD;
    Data->depthgenerator.GetMetaData(depthMD);

    // create arrays
    XnUInt32 cnt;
    XnPoint3D* proj = NULL;
    XnPoint3D* wrld = NULL;

    if (pixels.empty()) {

        // create arrays
        cnt = depthMD.XRes()*depthMD.YRes();
        proj = new XnPoint3D[ cnt ];
        wrld = new XnPoint3D[ cnt ];

        // Create projective coordinates
        for (size_t i=0, x=0; x<depthMD.XRes(); x++) {
            for (size_t y=0; y<depthMD.YRes(); i++, y++) {
                proj[i].X = (XnFloat)x;
                proj[i].Y = (XnFloat)y;
                proj[i].Z = depthMD(x,y);
            }
        }

    }
    else{

        // create arrays
        cnt = pixels.size();
        proj = new XnPoint3D[ cnt ];
        wrld = new XnPoint3D[ cnt ];

        for (size_t i=0; i<pixels.size(); i++) {
            unsigned int x = pixels[i][0];
            unsigned int y = pixels[i][1];
            proj[i].X = (XnFloat)x;
            proj[i].Y = (XnFloat)y;
            proj[i].Z = depthMD(x,y);
        }

    }

    // Convert projective to 3D
    XnStatus status = Data->depthgenerator.ConvertProjectiveToRealWorld(cnt, proj, wrld);
    if (status != XN_STATUS_OK) {
        CMN_LOG_RUN_ERROR << "Failed to convert projective to world: "
                          << xnGetStatusString(status) << std::endl;
    }

    // create matrix
    rangedata.SetSize(3, cnt);
    for (size_t i=0; i<cnt; i++) {
        rangedata[0][i] = -wrld[i].X/1000.0;
        rangedata[1][i] =  wrld[i].Y/1000.0;
        rangedata[2][i] =  wrld[i].Z/1000.0;
    }

    delete[] proj;
    delete[] wrld;

    return osaOpenNI::ESUCCESS;
}