Esempio n. 1
0
void readFrame()
{
	openni::Status rc = openni::STATUS_OK;

	openni::VideoStream* streams[] = {&g_depthStream, &g_colorStream, &g_irStream};

	int changedIndex = -1;
	while (rc == openni::STATUS_OK)
	{
		rc = openni::OpenNI::waitForAnyStream(streams, 3, &changedIndex, 0);
		if (rc == openni::STATUS_OK)
		{
			switch (changedIndex)
			{
			case 0:
				g_depthStream.readFrame(&g_depthFrame); break;
			case 1:
				g_colorStream.readFrame(&g_colorFrame); break;
			case 2:
				g_irStream.readFrame(&g_irFrame); break;
			default:
				printf("Error in wait\n");
			}
		}
	}
}
void KinectCamera::paint(QPainter *painter)
{

    if (!fig)//如果设备未打开,先执行startcamera
    {
        startcamera();
          if(m_streamsource=="depth")
           {
              int iMaxDepth = mDepthStream.getMaxPixelValue();
              mDepthStream.readFrame( &mDepthFrame );
              const cv::Mat mImageDepth(
                          mDepthFrame.getHeight(), mDepthFrame.getWidth(),
                          CV_16UC1, (void*)mDepthFrame.getData() );
              cv::Mat mScaledDepth;
              mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepth );
              QVector<QRgb>  colorTable;
              for(int k=0;k<256;++k)
              {
                  colorTable.push_back( qRgb(k,k,k) );
              }
              KinectDepthImage= QImage((const unsigned char*)mScaledDepth.data,mDepthFrame.getWidth(), mDepthFrame.getHeight(),QImage::Format_Indexed8);
              KinectDepthImage.setColorTable(colorTable);
              painter->drawImage(boundingRect().adjusted(1, 1, -1, -1),KinectDepthImage);
          }
          else
          {
              mColorStream.readFrame( &mColorFrame );
              KinectColorImage= QImage((const unsigned char*)mColorFrame.getData(),mColorFrame.getWidth(), mColorFrame.getHeight(),QImage::Format_RGB888);
              painter->drawImage(boundingRect().adjusted(1, 1, -1, -1),KinectColorImage);
          }
    }
    else//如果设备以打开,直接执行
    {
        if(m_streamsource=="depth")
        {
            int iMaxDepth = mDepthStream.getMaxPixelValue();
            mDepthStream.readFrame( &mDepthFrame );
            const cv::Mat mImageDepth(
                        mDepthFrame.getHeight(), mDepthFrame.getWidth(),
                        CV_16UC1, (void*)mDepthFrame.getData() );
            cv::Mat mScaledDepth;
            mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepth );
            QVector<QRgb>  colorTable;
            for(int k=0;k<256;++k)
            {
                colorTable.push_back( qRgb(k,k,k) );
            }
            KinectDepthImage= QImage((const unsigned char*)mScaledDepth.data,mDepthFrame.getWidth(), mDepthFrame.getHeight(),QImage::Format_Indexed8);
            KinectDepthImage.setColorTable(colorTable);
            painter->drawImage(boundingRect().adjusted(1, 1, -1, -1),KinectDepthImage);
        }
        else
        {
            mColorStream.readFrame( &mColorFrame );
            KinectColorImage= QImage((const unsigned char*)mColorFrame.getData(),mColorFrame.getWidth(), mColorFrame.getHeight(),QImage::Format_RGB888);
            painter->drawImage(boundingRect().adjusted(1, 1, -1, -1),KinectColorImage);
        }

    }
}
void ColorListener::onNewFrame(openni::VideoStream& vs)
{
    openni::VideoFrameRef frame;
    vs.readFrame(&frame);
    frames->push_back(frame);
    if(isUpdate) w->update();
}
Esempio n. 4
0
void pcl::io::OpenNI2Grabber::processIRFrame (openni::VideoStream& stream)
{
  openni::VideoFrameRef frame;
  stream.readFrame (&frame);

  FrameWrapper::Ptr frameWrapper = boost::make_shared<Openni2FrameWrapper>(frame);

  boost::shared_ptr<IRImage> image = boost::make_shared<IRImage> ( frameWrapper );

  irCallback (image, NULL);
}
Esempio n. 5
0
void ColorStreamListener::onNewFrame(openni::VideoStream& steam){
	//Log::i( TAG, "onNewFrame");
	steam.readFrame(&(this->Frame));
		if (Frame.isValid()){
			if ( openni::SENSOR_COLOR == Frame.getSensorType() ){
				//cv::Mat mColorMat_BGR;
				this->colorMat = new cv::Mat(Frame.getHeight(),Frame.getWidth(),CV_8UC3,(void*)Frame.getData());
				//cv::cvtColor(mColorMat,mColorMat_BGR,CV_RGB2BGR);
				this->mColorDevice->setData(*(this->colorMat));
			}/* End of if */
		}/* End of if */
}/* End of onNewFrame */
Esempio n. 6
0
void seekStream(openni::VideoStream* pStream, openni::VideoFrameRef* pCurFrame, int frameId)
{
    int numberOfFrames = 0;

    // Get number of frames
    numberOfFrames = g_pPlaybackControl->getNumberOfFrames(*pStream);

    // Seek
    openni::Status rc = g_pPlaybackControl->seek(*pStream, frameId);
    if (rc == openni::STATUS_OK)
    {
        // Read next frame from all streams.
        if (g_bIsDepthOn)
        {
            g_depthStream.readFrame(&g_depthFrame);
        }
        if (g_bIsColorOn)
        {
            g_colorStream.readFrame(&g_colorFrame);
        }
        if (g_bIsIROn)
        {
            g_irStream.readFrame(&g_irFrame);
        }

        // the new frameId might be different than expected (due to clipping to edges)
        frameId = pCurFrame->getFrameIndex();

        displayMessage("Current frame: %u/%u", frameId, numberOfFrames);
    }
    else if ((rc == openni::STATUS_NOT_IMPLEMENTED) || (rc == openni::STATUS_NOT_SUPPORTED) || (rc == openni::STATUS_BAD_PARAMETER) || (rc == openni::STATUS_NO_DEVICE))
    {
        displayError("Seeking is not supported");
    }
    else
    {
        displayError("Error seeking to frame:\n%s", openni::OpenNI::getExtendedError());
    }
}
Esempio n. 7
0
void pcl::io::OpenNI2Grabber::processDepthFrame (openni::VideoStream& stream)
{
  openni::VideoFrameRef frame;
  stream.readFrame (&frame);
  FrameWrapper::Ptr frameWrapper = boost::make_shared<Openni2FrameWrapper>(frame);

  float focalLength = device_->getDepthFocalLength ();

  float baseline = device_->getBaseline();
  pcl::uint64_t no_sample_value = device_->getShadowValue();
  pcl::uint64_t shadow_value = no_sample_value;
  
  boost::shared_ptr<DepthImage> image  = 
   boost::make_shared<DepthImage> (frameWrapper, baseline, focalLength, shadow_value, no_sample_value);

  depthCallback (image, NULL);
}
Esempio n. 8
0
// Convert VideoFrameRef into pcl::Image and forward to registered callbacks
void pcl::io::OpenNI2Grabber::processColorFrame (openni::VideoStream& stream)
{
  Image::Timestamp t_callback = Image::Clock::now ();

  openni::VideoFrameRef frame;
  stream.readFrame (&frame);
  FrameWrapper::Ptr frameWrapper = boost::make_shared<Openni2FrameWrapper>(frame);

  openni::PixelFormat format = frame.getVideoMode ().getPixelFormat ();
  boost::shared_ptr<Image> image;

  // Convert frame to PCL image type, based on pixel format
  if (format == openni::PIXEL_FORMAT_YUV422)
    image = boost::make_shared<ImageYUV422> (frameWrapper, t_callback);
  else //if (format == PixelFormat::PIXEL_FORMAT_RGB888)
    image = boost::make_shared<ImageRGB24> (frameWrapper, t_callback);

  imageCallback (image, NULL);
}
Esempio n. 9
0
void streamFrameListener::onNewFrame(openni::VideoStream& stream)
{
    LockGuard guard(mutex);
    stream.readFrame(&frameRef);

    if (!frameRef.isValid() || !frameRef.getData())
    {
        yInfo() << "frame lost";
        return;
    }

    int pixC;

    pixF     = stream.getVideoMode().getPixelFormat();
    pixC     = depthCameraDriver::pixFormatToCode(pixF);
    w        = frameRef.getWidth();
    h        = frameRef.getHeight();
    dataSize = frameRef.getDataSize();

    if (isReady == false)
    {
        isReady = true;
    }

    if(pixC == VOCAB_PIXEL_INVALID)
    {
        yError() << "depthCameraDriver: Pixel Format not recognized";
        return;
    }

    image.setPixelCode(pixC);
    image.resize(w, h);

    if(image.getRawImageSize() != frameRef.getDataSize())
    {
        yError() << "depthCameraDriver:device and local copy data size doesn't match";
        return;
    }

    memcpy((void*)image.getRawImage(), (void*)frameRef.getData(), frameRef.getDataSize());
    stamp.update();
    return;
}
Esempio n. 10
0
	void onNewFrame( openni::VideoStream& rStream )
	{
		openni::VideoFrameRef mFrame;
		// read frame from real video stream
		if( rStream.readFrame( &mFrame ) == openni::STATUS_OK )
		{
			// get a frame form virtual video stream
			OniFrame* pFrame = NULL;
			if( m_rVStream.invoke( GET_VIRTUAL_STREAM_IMAGE, pFrame ) == openni::STATUS_OK )
			{
				memcpy( pFrame->data, mFrame.getData(), mFrame.getDataSize() );

				pFrame->frameIndex = mFrame.getFrameIndex();
				pFrame->timestamp = mFrame.getTimestamp();

				// write data to form virtual video stream
				m_rVStream.invoke( SET_VIRTUAL_STREAM_IMAGE, pFrame );
			}
		}
		mFrame.release();
	}
Esempio n. 11
0
  double grabFrame(cv::Mat& color) {
    int changed_index;
    auto status = openni::OpenNI::waitForAnyStream(streams.data(), 1, &changed_index);
    if (status != openni::STATUS_OK)
      return false;

    color_stream.readFrame(&color_frame);
    if (!color_frame.isValid())
      return false;

    auto tgt = color.data;
    auto src = reinterpret_cast<const uint8_t*>(color_frame.getData());
    for (size_t i = 0; i < color.total(); ++i) {
      *tgt++ = *(src + 2);
      *tgt++ = *(src + 1);
      *tgt++ = *(src + 0);
      src += 3;
    }

    ++next_frame_index;
    return true;
  }
void OpenNI2FrameListener::onNewFrame(openni::VideoStream& stream)
{
  stream.readFrame(&m_frame);

  if (m_frame.isValid() && callback_)
  {
    sensor_msgs::ImagePtr image(new sensor_msgs::Image);

    ros::Time ros_now = ros::Time::now();

    if (!user_device_timer_)
    {
      image->header.stamp = ros_now;

      ROS_DEBUG("Time interval between frames: %.4f ms", (float)((ros_now.toSec()-prev_time_stamp_)*1000.0));

      prev_time_stamp_ = ros_now.toSec();
    } else
    {
      uint64_t device_time = m_frame.getTimestamp();

      double device_time_in_sec = static_cast<double>(device_time)/1000000.0;
      double ros_time_in_sec = ros_now.toSec();

      double time_diff = ros_time_in_sec-device_time_in_sec;

      timer_filter_->addSample(time_diff);

      double filtered_time_diff = timer_filter_->getMedian();

      double corrected_timestamp = device_time_in_sec+filtered_time_diff;

      image->header.stamp.fromSec(corrected_timestamp);

      ROS_DEBUG("Time interval between frames: %.4f ms", (float)((corrected_timestamp-prev_time_stamp_)*1000.0));

      prev_time_stamp_ = corrected_timestamp;
    }

    image->width = m_frame.getWidth();
    image->height = m_frame.getHeight();

    std::size_t data_size = m_frame.getDataSize();

    image->data.resize(data_size);
    memcpy(&image->data[0], m_frame.getData(), data_size);

    image->is_bigendian = 0;

    const openni::VideoMode& video_mode = m_frame.getVideoMode();
    switch (video_mode.getPixelFormat())
    {
      case openni::PIXEL_FORMAT_DEPTH_1_MM:
        image->encoding = sensor_msgs::image_encodings::TYPE_16UC1;
        image->step = sizeof(unsigned char) * 2 * image->width;
        break;
      case openni::PIXEL_FORMAT_DEPTH_100_UM:
        image->encoding = sensor_msgs::image_encodings::TYPE_16UC1;
        image->step = sizeof(unsigned char) * 2 * image->width;
        break;
      case openni::PIXEL_FORMAT_SHIFT_9_2:
        image->encoding = sensor_msgs::image_encodings::TYPE_16UC1;
        image->step = sizeof(unsigned char) * 2 * image->width;
        break;
      case openni::PIXEL_FORMAT_SHIFT_9_3:
        image->encoding = sensor_msgs::image_encodings::TYPE_16UC1;
        image->step = sizeof(unsigned char) * 2 * image->width;
        break;

      case openni::PIXEL_FORMAT_RGB888:
        image->encoding = sensor_msgs::image_encodings::RGB8;
        image->step = sizeof(unsigned char) * 3 * image->width;
        break;
      case openni::PIXEL_FORMAT_YUV422:
        image->encoding = sensor_msgs::image_encodings::YUV422;
        image->step = sizeof(unsigned char) * 4 * image->width;
        break;
      case openni::PIXEL_FORMAT_GRAY8:
        image->encoding = sensor_msgs::image_encodings::MONO8;
        image->step = sizeof(unsigned char) * 1 * image->width;
        break;
      case openni::PIXEL_FORMAT_GRAY16:
        image->encoding = sensor_msgs::image_encodings::MONO16;
        image->step = sizeof(unsigned char) * 2 * image->width;
        break;
      case openni::PIXEL_FORMAT_JPEG:
      default:
        ROS_ERROR("Invalid image encoding");
        break;
    }

    callback_(image);
  }

}
Esempio n. 13
0
void seekFrame(int nDiff)
{
	// Make sure seek is required.
	if (nDiff == 0)
	{
		return;
	}

	if (g_pPlaybackControl == NULL)
	{
		return;
	}

	int frameId = 0, numberOfFrames = 0;
	openni::VideoStream* pStream = NULL;
	openni::VideoFrameRef* pCurFrame;
	if (g_bIsDepthOn)
	{
		pCurFrame = &g_depthFrame;
		pStream = &g_depthStream;
	}
	else if (g_bIsColorOn)
	{
		pCurFrame = &g_colorFrame;
		pStream = &g_colorStream;
	}
	else if (g_bIsIROn)
	{
		pCurFrame = &g_irFrame;
		pStream = &g_irStream;
	}
	else
	{
		return;
	}
	frameId = pCurFrame->getFrameIndex();

	// Get number of frames
	numberOfFrames = g_pPlaybackControl->getNumberOfFrames(*pStream);

	// Calculate the new frame ID and seek stream.
	frameId = (frameId + nDiff < 1) ? 1 : frameId + nDiff;
	openni::Status rc = g_pPlaybackControl->seek(*pStream, frameId);
	if (rc == openni::STATUS_OK)
	{
		// Read next frame from all streams.
		if (g_bIsDepthOn)
		{
			g_depthStream.readFrame(&g_depthFrame);
		}
		if (g_bIsColorOn)
		{
			g_colorStream.readFrame(&g_colorFrame);
		}
		if (g_bIsIROn)
		{
			g_irStream.readFrame(&g_irFrame);
		}
		// the new frameId might be different than expected (due to clipping to edges)
		frameId = pCurFrame->getFrameIndex();

		displayMessage("Seeked to frame %u/%u", frameId, numberOfFrames);
	}
	else if ((rc == openni::STATUS_NOT_IMPLEMENTED) || (rc == openni::STATUS_NOT_SUPPORTED) || (rc == openni::STATUS_BAD_PARAMETER) || (rc == openni::STATUS_NO_DEVICE))
	{
		displayError("Seeking is not supported");
	}
	else
	{
		displayError("Error seeking to frame:\n%s", openni::OpenNI::getExtendedError());
	}
}