Esempio n. 1
0
  inline void cvt_oniimage(openni::VideoFrameRef src, image &to, const MemOp &m)
  {
	const void* data = src.getData();
	void* datab = const_cast<void*>(data);
    to = image(src.getWidth(), src.getHeight(), src.getStrideInBytes(), datab, m);
    to.set_format(image::FORMAT_DEPTH_16);
  }
void convert_depth_map(const openni::VideoFrameRef &in, cv::Mat& out)
{
    const void *data = in.getData();
    int sizes[2] = {in.getHeight(), in.getWidth()};

    cv::Mat s1, s2, s3;
    s1 = cv::Mat(2, sizes, CV_16UC1, (void*)data);
	cv::normalize(s1, s2, 0, 255, CV_MINMAX, CV_8UC1);
    cv::cvtColor(s2,out, CV_GRAY2BGR);
	
	/*
		const nite::UserId* pLabels = map.getPixels();
	for (int y=0; y<map.getHeight(); ++y)
	{
		for (int x=0;x<map.getWidth(); ++x, ++pLabels)
		{
			uint16_t &v = s1.at<uint16_t>(cv::Point(x,y));
			if (!*pLabels)
				v = 0;
		}
	}
*/
	
//	cv::normalize(s1, out, 0, 255, CV_MINMAX, CV_8UC1);
}
Esempio n. 3
0
void CGraph::Calculate(float* pHistogram, int histogramSize, const openni::VideoFrameRef& depthFrame)
{
	const openni::DepthPixel* pDepth = (const openni::DepthPixel*)depthFrame.getData();
	int width = depthFrame.getWidth();
	int height = depthFrame.getHeight();
	memset(pHistogram, 0, histogramSize*sizeof(float));
	int restOfRow = depthFrame.getStrideInBytes() / sizeof(openni::DepthPixel) - width;

	unsigned int nNumberOfPoints = 0;
	for (int y = 0; y < height; ++y)
	{
		for (int x = 0; x < width; ++x, ++pDepth)
		{
			if (*pDepth != 0)
			{
				pHistogram[*pDepth]++;
				nNumberOfPoints++;
			}
		}
		pDepth += restOfRow;
	}
	for (int nIndex = 1; nIndex < histogramSize; nIndex++)
	{
		pHistogram[nIndex] += pHistogram[nIndex - 1];
	}
	if (nNumberOfPoints)
	{
		for (int nIndex = 1; nIndex < histogramSize; nIndex++)
		{
			pHistogram[nIndex] = (256 * (1.0f - (pHistogram[nIndex] / nNumberOfPoints)));
		}
	}
}
Esempio n. 4
0
/*
* Fuction to draw histogram of depth image
*/
void calculateHistogram(int* pHistogram, int histogramSize, const openni::VideoFrameRef& depthFrame)
{
	const openni::DepthPixel* pDepth = (const openni::DepthPixel*)depthFrame.getData();
	int* pHistogram_temp = new int[histogramSize];
	int width = depthFrame.getWidth();
	int height = depthFrame.getHeight();
	// Calculate the accumulative histogram (the yellow HandSegmentation...)
	memset(pHistogram, 0, histogramSize*sizeof(int));
	memset(pHistogram_temp, 0, histogramSize*sizeof(int));
	int restOfRow = depthFrame.getStrideInBytes() / sizeof(openni::DepthPixel) - width;

	unsigned int nNumberOfPoints = 0;
	for (int y = 0; y < height; ++y)
	{
		for (int x = 0; x < width; ++x, ++pDepth)
		{
			if (*pDepth != 0 && *pDepth <= MAX_DEPTH)
			{
				pHistogram_temp[*pDepth]++;
				nNumberOfPoints++;
			}
		}
		pDepth += restOfRow;
	}
	if (nNumberOfPoints)
	{
		for (int nIndex=1; nIndex < histogramSize; nIndex++)
		{
			pHistogram_temp[nIndex] += pHistogram_temp[nIndex-1];
			pHistogram[nIndex] = (int)(256 * (1.0f - ((float)pHistogram_temp[nIndex] / nNumberOfPoints)));
		}
	}
}
Esempio n. 5
0
void calculateHistogram(float* pHistogram, int histogramSize, const openni::VideoFrameRef& frame)
{
	const openni::DepthPixel* pDepth = (const openni::DepthPixel*)frame.getData();
	// Calculate the accumulative histogram (the yellow display...)
	memset(pHistogram, 0, histogramSize*sizeof(float));
	int restOfRow = frame.getStrideInBytes() / sizeof(openni::DepthPixel) - frame.getWidth();
	int height = frame.getHeight();
	int width = frame.getWidth();

	unsigned int nNumberOfPoints = 0;
	for (int y = 0; y < height; ++y)
	{
		for (int x = 0; x < width; ++x, ++pDepth)
		{
			if (*pDepth != 0)
			{
				pHistogram[*pDepth]++;
				nNumberOfPoints++;
			}
		}
		pDepth += restOfRow;
	}
	for (int nIndex=1; nIndex<histogramSize; nIndex++)
	{
		pHistogram[nIndex] += pHistogram[nIndex-1];
	}
	if (nNumberOfPoints)
	{
		for (int nIndex=1; nIndex<histogramSize; nIndex++)
		{
			pHistogram[nIndex] = (256 * (1.0f - (pHistogram[nIndex] / nNumberOfPoints)));
		}
	}
}
Esempio n. 6
0
void ColorStream::setPixels(openni::VideoFrameRef frame)
{
    Stream::setPixels(frame);

    openni::VideoMode m = frame.getVideoMode();

    int w = m.getResolutionX();
    int h = m.getResolutionY();
    int num_pixels = w * h;

    pix.allocate(w, h, 3);

    if (m.getPixelFormat() == openni::PIXEL_FORMAT_RGB888)
    {
        const unsigned char *src = (const unsigned char*)frame.getData();
        unsigned char *dst = pix.getBackBuffer().getPixels();

        for (int i = 0; i < num_pixels; i++)
        {
            dst[0] = src[0];
            dst[1] = src[1];
            dst[2] = src[2];
            src += 3;
            dst += 3;
        }
    }

    pix.swap();
}
void GeomDepthCalculator::SetDepthFrame(openni::VideoFrameRef& depthFrame)
{
	int w = depthFrame.getWidth();
	int h = depthFrame.getHeight();

	

	const openni::DepthPixel* pDepthRow = (const openni::DepthPixel*)depthFrame.getData();

	int rowSize = depthFrame.getStrideInBytes();
	rowSize /= sizeof(openni::DepthPixel);

	DepthFrame::FrameData<ushort>* frame = 0;
	if (m_frames.size() < m_maxFrames)
	{
		frame = new DepthFrame::FrameData<ushort>();
	}
	else
	{
		frame = *m_frames.begin();
		m_frames.erase(m_frames.begin());
	}
	frame->copyData(pDepthRow, w, h);
	m_frames.push_back(frame);

	m_avgFrame.createData(w,h);

	_averageFrames();

	m_frame->SetRawData(m_avgFrame.Data(), w, h);
	m_frame->CalculateDepth();
	if (m_calcNormals)
		m_frame->CalculateNormals();
	
}
Esempio n. 8
0
 void copyFrame(openni::VideoFrameRef& frame, MRPT_DATA& dst){
     const char*  data    = (const char*)frame.getData();
     const int    stride  = frame.getStrideInBytes();
     const int    width   = frame.getWidth();
     const int    height  = frame.getHeight();
     resize(dst, width, height);
     for (int y = 0; y < height; ++y, data+=stride){
         copyRow<NI_PIXEL, MRPT_DATA>(data, dst, width, y);
     }
 }
Esempio n. 9
0
 cv::Mat showDepthStream( const openni::VideoFrameRef& depthFrame )
 {
   // 距離データを画像化する(16bit)
   cv::Mat depthImage = cv::Mat( depthFrame.getHeight(),
                                depthFrame.getWidth(),
                                CV_16UC1, (unsigned short*)depthFrame.getData() );
   
   // 0-10000mmまでのデータを0-255(8bit)にする
   depthImage.convertTo( depthImage, CV_8U, 255.0 / 10000 );
   
   return depthImage;
 }
Esempio n. 10
0
 // カラーストリームを表示できる形に変換する
 cv::Mat showColorStream( const openni::VideoFrameRef& colorFrame )
 {
   // OpenCV の形に変換する
   cv::Mat colorImage = cv::Mat( colorFrame.getHeight(),
                                colorFrame.getWidth(),
                                CV_8UC3, (unsigned char*)colorFrame.getData() );
   
   // BGR の並びを RGB に変換する
   cv::cvtColor( colorImage, colorImage, CV_RGB2BGR );
   
   return colorImage;
 }
Esempio n. 11
0
void DepthStream::setPixels(openni::VideoFrameRef frame)
{
    Stream::setPixels(frame);

    const unsigned short *pixels = (const unsigned short*)frame.getData();
    int w = frame.getVideoMode().getResolutionX();
    int h = frame.getVideoMode().getResolutionY();
    int num_pixels = w * h;

    pix.allocate(w, h, 1);
    pix.getBackBuffer().setFromPixels(pixels, w, h, OF_IMAGE_GRAYSCALE);
    pix.swap();
}
Esempio n. 12
0
cv::Mat getColorImage(openni::VideoFrameRef& color_frame)
{
  if(!color_frame.isValid())
  {
    return cv::Mat();
  }
  openni::VideoMode video_mode = color_frame.getVideoMode();
  cv::Mat color_img = cv::Mat(video_mode.getResolutionY(),
                              video_mode.getResolutionX(),
                              CV_8UC3, (char*)color_frame.getData());
  cv::Mat ret_img;
  cv::cvtColor(color_img, ret_img, CV_RGB2BGR);
  return ret_img;
}
Esempio n. 13
0
// CV_16U
cv::Mat getDepthImage(openni::VideoFrameRef& depth_frame)
{
  if(!depth_frame.isValid())
  {
    return cv::Mat();
  }

  openni::VideoMode video_mode = depth_frame.getVideoMode();
  cv::Mat depth_img = cv::Mat(video_mode.getResolutionY(),
                              video_mode.getResolutionX(),
                              CV_16U, (char*)depth_frame.getData());

  return depth_img.clone();
}
Esempio n. 14
0
void toggleStreamState(openni::VideoStream& stream, openni::VideoFrameRef& frame, bool& isOn, openni::SensorType type, const char* name)
{
    openni::Status nRetVal = openni::STATUS_OK;

    if (!stream.isValid())
    {
        nRetVal = stream.create(g_device, type);
        if (nRetVal != openni::STATUS_OK)
        {
            displayError("Failed to create %s stream:\n%s", name, openni::OpenNI::getExtendedError());
            return;
        }
    }

    if (isOn)
    {
        stream.stop();
        frame.release();
    }
    else
    {
        nRetVal = stream.start();
        if (nRetVal != openni::STATUS_OK)
        {
            displayError("Failed to start %s stream:\n%s", name, openni::OpenNI::getExtendedError());
            return;
        }
    }

    isOn = !isOn;
}
Esempio n. 15
0
void SampleViewer::displayFrame(const openni::VideoFrameRef& frame)
{
	if (!frame.isValid())
		return;

	const openni::DepthPixel* pDepthRow = (const openni::DepthPixel*)frame.getData();
	openni::RGB888Pixel* pTexRow = m_pTexMap + frame.getCropOriginY() * m_nTexMapX;
	int rowSize = frame.getStrideInBytes() / sizeof(openni::DepthPixel);

	for (int y = 0; y < frame.getHeight(); ++y)
	{
		const openni::DepthPixel* pDepth = pDepthRow;
		openni::RGB888Pixel* pTex = pTexRow + frame.getCropOriginX();

		for (int x = 0; x < frame.getWidth(); ++x, ++pDepth, ++pTex)
		{
			if (*pDepth != 0)
			{
				int nHistValue = m_pDepthHist[*pDepth];
				pTex->r = nHistValue;
				pTex->g = nHistValue;
				pTex->b = nHistValue;
			}
		}

		pDepthRow += rowSize;
		pTexRow += m_nTexMapX;
	}

}
void convert_pixel_map(const openni::VideoFrameRef &in, cv::Mat& out)
{
    const void *data = in.getData();
    int sizes[2] = {in.getHeight(), in.getWidth()};

    cv::Mat s1, &s2 = out;
    s1 = cv::Mat(2, sizes, CV_8UC3, (void *)data);
    cv::cvtColor(s1,s2, CV_RGB2BGR);
	
	/*
	const nite::UserId* pLabels = map.getPixels();
	for (int y=0; y<map.getHeight(); ++y)
	{
		for (int x=0;x<map.getWidth(); ++x, ++pLabels)
		{
			cv::Vec3b &v = s2.at<cv::Vec3b>(cv::Point(x,y));
			if (*pLabels == 0)
				v = cv::Vec3b(0,0,0);
		}
	}
	*/
}
Esempio n. 17
0
  double grabFrame(cv::Mat& color) {
    int changed_index;
    auto status = openni::OpenNI::waitForAnyStream(streams.data(), 1, &changed_index);
    if (status != openni::STATUS_OK)
      return false;

    color_stream.readFrame(&color_frame);
    if (!color_frame.isValid())
      return false;

    auto tgt = color.data;
    auto src = reinterpret_cast<const uint8_t*>(color_frame.getData());
    for (size_t i = 0; i < color.total(); ++i) {
      *tgt++ = *(src + 2);
      *tgt++ = *(src + 1);
      *tgt++ = *(src + 0);
      src += 3;
    }

    ++next_frame_index;
    return true;
  }
Esempio n. 18
0
void IrStream::setPixels(openni::VideoFrameRef frame)
{
    Stream::setPixels(frame);

    openni::VideoMode m = frame.getVideoMode();

    int w = m.getResolutionX();
    int h = m.getResolutionY();
    int num_pixels = w * h;

    pix.allocate(w, h, 1);

    if (m.getPixelFormat() == openni::PIXEL_FORMAT_GRAY8)
    {
        const unsigned char *src = (const unsigned char*)frame.getData();
        unsigned char *dst = pix.getBackBuffer().getPixels();

        for (int i = 0; i < num_pixels; i++)
        {
            dst[0] = src[0];
            src++;;
            dst++;
        }
    }
    else if (m.getPixelFormat() == openni::PIXEL_FORMAT_GRAY16)
    {
        const unsigned short *src = (const unsigned short*)frame.getData();
        unsigned char *dst = pix.getBackBuffer().getPixels();

        for (int i = 0; i < num_pixels; i++)
        {
            dst[0] = src[0] >> 2;
            src++;;
            dst++;
        }
    }

    pix.swap();
}
Esempio n. 19
0
            static void toCVTImage( Image& dst, const openni::VideoFrameRef& frame )
            {
                dst.reallocate( frame.getWidth(), frame.getHeight(), Openni2Helper::toIFormat( frame.getVideoMode().getPixelFormat() ) );

                switch( frame.getVideoMode().getPixelFormat() ){
                    case openni::PIXEL_FORMAT_RGB888:
                        copyRGB( dst, ( const uint8_t* )frame.getData(), frame.getStrideInBytes() );
                        break;
                    default:
                        copyData( dst, ( const uint8_t* )frame.getData(), frame.getStrideInBytes() );
                }
            }
Esempio n. 20
0
unsigned short DepthSensor::getSmallerDepth( const openni::VideoFrameRef& depthFrame ) {
    openni::VideoMode videoMode = depthStream.getVideoMode();

    int depthIndex = videoMode.getResolutionX() * videoMode.getResolutionY();

    unsigned short* depth = (unsigned short*)depthFrame.getData();

    unsigned short min = 10000;
    for (int i = 0; i < depthIndex; i++) {
        if (depth[i] == 0) {
        }
        else if (min > depth[i]) {
            min = depth[i];
        }
    }
    return min;
}
Esempio n. 21
0
void setIRVideoMode(int mode)
{
	bool bIsStreamOn = g_bIsIROn;
	if (bIsStreamOn)
	{
		g_bIsIROn = false;
		g_irStream.stop();
	}

	g_irFrame.release();
	g_irStream.setVideoMode(g_irSensorInfo->getSupportedVideoModes()[mode]);
	if (bIsStreamOn)
	{
		g_irStream.start();
		g_bIsIROn = true;
	}
}
Esempio n. 22
0
void toggleIRState(int )
{
	if (g_irStream.isValid()) 
	{
		if(g_bIsIROn)
		{
			g_irStream.stop();
			g_irFrame.release();
		}
		else
		{
			openni::Status nRetVal = g_irStream.start();
			if (nRetVal != openni::STATUS_OK)
			{
				displayError("Failed to start IR stream:\n%s", openni::OpenNI::getExtendedError());
				return;
			}
		}

		g_bIsIROn = !g_bIsIROn;
	}
}
Esempio n. 23
0
void KinectHelper::updateColorFrame(openni::VideoFrameRef frame){
    DEBUG_QUEUE qDebug() << "queued color frame#" << frame.getFrameIndex();
    colorQueue.enqueue(frame);
}
Esempio n. 24
0
void Stream::setPixels(openni::VideoFrameRef frame)
{
    openni_timestamp = frame.getTimestamp();
}
Esempio n. 25
0
void SampleViewer::display()
{
	int changedIndex;
	openni::Status rc = openni::OpenNI::waitForAnyStream(m_streams, 2, &changedIndex);
	if (rc != openni::STATUS_OK)
	{
		printf("Wait failed\n");
		return;
	}

	switch (changedIndex)
	{
	case 0:
		m_depthStream.readFrame(&m_depthFrame); break;
	case 1:
		m_colorStream.readFrame(&m_colorFrame); break;
	default:
		printf("Error in wait\n");
	}

	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();
	glOrtho(0, GL_WIN_SIZE_X, GL_WIN_SIZE_Y, 0, -1.0, 1.0);

	if (m_depthFrame.isValid())
	{
		calculateHistogram(m_pDepthHist, MAX_DEPTH, m_depthFrame);
	}

	memset(m_pTexMap, 0, m_nTexMapX*m_nTexMapY*sizeof(openni::RGB888Pixel));

	// check if we need to draw image frame to texture
	if ((m_eViewState == DISPLAY_MODE_OVERLAY ||
		m_eViewState == DISPLAY_MODE_IMAGE) && m_colorFrame.isValid())
	{
		const openni::RGB888Pixel* pImageRow = (const openni::RGB888Pixel*)m_colorFrame.getData();
		openni::RGB888Pixel* pTexRow = m_pTexMap + m_colorFrame.getCropOriginY() * m_nTexMapX;
		int rowSize = m_colorFrame.getStrideInBytes() / sizeof(openni::RGB888Pixel);

		for (int y = 0; y < m_colorFrame.getHeight(); ++y)
		{
			const openni::RGB888Pixel* pImage = pImageRow;
			openni::RGB888Pixel* pTex = pTexRow + m_colorFrame.getCropOriginX();

			for (int x = 0; x < m_colorFrame.getWidth(); ++x, ++pImage, ++pTex)
			{
				*pTex = *pImage;
			}

			pImageRow += rowSize;
			pTexRow += m_nTexMapX;
		}
	}

	// check if we need to draw depth frame to texture
	if ((m_eViewState == DISPLAY_MODE_OVERLAY ||
		m_eViewState == DISPLAY_MODE_DEPTH) && m_depthFrame.isValid())
	{
		const openni::DepthPixel* pDepthRow = (const openni::DepthPixel*)m_depthFrame.getData();
		openni::RGB888Pixel* pTexRow = m_pTexMap + m_depthFrame.getCropOriginY() * m_nTexMapX;
		int rowSize = m_depthFrame.getStrideInBytes() / sizeof(openni::DepthPixel);

		for (int y = 0; y < m_depthFrame.getHeight(); ++y)
		{
			const openni::DepthPixel* pDepth = pDepthRow;
			openni::RGB888Pixel* pTex = pTexRow + m_depthFrame.getCropOriginX();

			for (int x = 0; x < m_depthFrame.getWidth(); ++x, ++pDepth, ++pTex)
			{
				if (*pDepth != 0)
				{
					int nHistValue = m_pDepthHist[*pDepth];
					pTex->r = nHistValue;
					pTex->g = nHistValue;
					pTex->b = 0;
				}
			}

			pDepthRow += rowSize;
			pTexRow += m_nTexMapX;
		}
	}

	glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP_SGIS, GL_TRUE);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
	glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, m_nTexMapX, m_nTexMapY, 0, GL_RGB, GL_UNSIGNED_BYTE, m_pTexMap);

	// Display the OpenGL texture map
	glColor4f(1,1,1,1);

	glBegin(GL_QUADS);

	int nXRes = m_width;
	int nYRes = m_height;

	// upper left
	glTexCoord2f(0, 0);
	glVertex2f(0, 0);
	// upper right
	glTexCoord2f((float)nXRes/(float)m_nTexMapX, 0);
	glVertex2f(GL_WIN_SIZE_X, 0);
	// bottom right
	glTexCoord2f((float)nXRes/(float)m_nTexMapX, (float)nYRes/(float)m_nTexMapY);
	glVertex2f(GL_WIN_SIZE_X, GL_WIN_SIZE_Y);
	// bottom left
	glTexCoord2f(0, (float)nYRes/(float)m_nTexMapY);
	glVertex2f(0, GL_WIN_SIZE_Y);

	glEnd();

	// Swap the OpenGL display buffers
	glutSwapBuffers();

}
void KinectCamera::paint(QPainter *painter)
{

    if (!fig)//如果设备未打开,先执行startcamera
    {
        startcamera();
          if(m_streamsource=="depth")
           {
              int iMaxDepth = mDepthStream.getMaxPixelValue();
              mDepthStream.readFrame( &mDepthFrame );
              const cv::Mat mImageDepth(
                          mDepthFrame.getHeight(), mDepthFrame.getWidth(),
                          CV_16UC1, (void*)mDepthFrame.getData() );
              cv::Mat mScaledDepth;
              mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepth );
              QVector<QRgb>  colorTable;
              for(int k=0;k<256;++k)
              {
                  colorTable.push_back( qRgb(k,k,k) );
              }
              KinectDepthImage= QImage((const unsigned char*)mScaledDepth.data,mDepthFrame.getWidth(), mDepthFrame.getHeight(),QImage::Format_Indexed8);
              KinectDepthImage.setColorTable(colorTable);
              painter->drawImage(boundingRect().adjusted(1, 1, -1, -1),KinectDepthImage);
          }
          else
          {
              mColorStream.readFrame( &mColorFrame );
              KinectColorImage= QImage((const unsigned char*)mColorFrame.getData(),mColorFrame.getWidth(), mColorFrame.getHeight(),QImage::Format_RGB888);
              painter->drawImage(boundingRect().adjusted(1, 1, -1, -1),KinectColorImage);
          }
    }
    else//如果设备以打开,直接执行
    {
        if(m_streamsource=="depth")
        {
            int iMaxDepth = mDepthStream.getMaxPixelValue();
            mDepthStream.readFrame( &mDepthFrame );
            const cv::Mat mImageDepth(
                        mDepthFrame.getHeight(), mDepthFrame.getWidth(),
                        CV_16UC1, (void*)mDepthFrame.getData() );
            cv::Mat mScaledDepth;
            mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepth );
            QVector<QRgb>  colorTable;
            for(int k=0;k<256;++k)
            {
                colorTable.push_back( qRgb(k,k,k) );
            }
            KinectDepthImage= QImage((const unsigned char*)mScaledDepth.data,mDepthFrame.getWidth(), mDepthFrame.getHeight(),QImage::Format_Indexed8);
            KinectDepthImage.setColorTable(colorTable);
            painter->drawImage(boundingRect().adjusted(1, 1, -1, -1),KinectDepthImage);
        }
        else
        {
            mColorStream.readFrame( &mColorFrame );
            KinectColorImage= QImage((const unsigned char*)mColorFrame.getData(),mColorFrame.getWidth(), mColorFrame.getHeight(),QImage::Format_RGB888);
            painter->drawImage(boundingRect().adjusted(1, 1, -1, -1),KinectColorImage);
        }

    }
}
Esempio n. 27
0
 // カラーストリームを表示できる形に変換する
 cv::Mat showColorStream( const openni::VideoFrameRef& colorFrame )
 {
   cv::Mat colorImage;
   
   // Color ストリーム
   if ( colorFrame.getVideoMode().getPixelFormat() ==
       openni::PIXEL_FORMAT_RGB888 ) {
     // OpenCV の形に変換する
     colorImage = cv::Mat( colorFrame.getHeight(),
                          colorFrame.getWidth(),
                          CV_8UC3, (unsigned char*)colorFrame.getData() );
     
     // BGR の並びを RGB に変換する
     cv::cvtColor( colorImage, colorImage, CV_RGB2BGR );
   }
   // Xtion IR ストリーム
   else if ( colorFrame.getVideoMode().getPixelFormat() ==
            openni::PIXEL_FORMAT_GRAY16 ) {
     // XitonのIRのフォーマットは16bitグレースケール
     // 実際は255諧調らしく、CV_8Uに落とさないと見えない
     colorImage = cv::Mat( colorFrame.getHeight(),
                          colorFrame.getWidth(),
                          CV_16UC1, (unsigned short*)colorFrame.getData() );
     colorImage.convertTo( colorImage, CV_8U );
   }
   // Kinect for Windows IR ストリーム
   else {
     // KinectのIRのフォーマットは8bitグレースケール
     // Kinect SDKは16bitグレースケール
     colorImage = cv::Mat( colorFrame.getHeight(),
                          colorFrame.getWidth(),
                          CV_8UC1, (unsigned char*)colorFrame.getData() );
   }
   
   return colorImage;
 }
Esempio n. 28
0
void SampleViewer::Display()
{

	nite::Status rc = m_pHandTracker->readFrame(&handFrame);
	if (rc != nite::STATUS_OK)
	{
		printf("GetNextData failed\n");
		return;
	}

	depthFrame = handFrame.getDepthFrame();

	if (m_pTexMap == NULL)
	{
		// Texture map init
		m_nTexMapX = MIN_CHUNKS_SIZE(depthFrame.getVideoMode().getResolutionX(), TEXTURE_SIZE);
		m_nTexMapY = MIN_CHUNKS_SIZE(depthFrame.getVideoMode().getResolutionY(), TEXTURE_SIZE);
		m_pTexMap = new openni::RGB888Pixel[m_nTexMapX * m_nTexMapY];
	}

	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
	glEnable(GL_DEPTH_TEST);
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();
	glOrtho(0, GL_WIN_SIZE_X, GL_WIN_SIZE_Y, 0, -10000.0, 10000.0);



	if (depthFrame.isValid())
	{
		calculateHistogram(m_pDepthHist, MAX_DEPTH, depthFrame);
	}

	memset(m_pTexMap, 0, m_nTexMapX*m_nTexMapY*sizeof(openni::RGB888Pixel));

	float factor[3] = {1, 1, 1};
	// check if we need to draw depth frame to texture
	
	float av_x = 0;
	float av_y = 0;
	int counter= 0;



	for(int i = 0; i<=7 ; i++)
	      note_on[i] = false;

	if (depthFrame.isValid() && g_drawDepth)
	{
		const openni::DepthPixel* pDepthRow = (const openni::DepthPixel*)depthFrame.getData();
		const openni::DepthPixel* pDepthRow1 = pDepthRow;
		openni::RGB888Pixel* pTexRow = m_pTexMap + depthFrame.getCropOriginY() * m_nTexMapX;
		int rowSize = depthFrame.getStrideInBytes() / sizeof(openni::DepthPixel);
		glPointSize(2);
		glBegin(GL_POINTS);

		for (int y = 0; y < depthFrame.getHeight(); ++y)
		{
			const openni::DepthPixel* pDepth = pDepthRow;
			openni::RGB888Pixel* pTex = pTexRow + depthFrame.getCropOriginX();
			//chord_temp = 0;
			for (int x = 0; x < depthFrame.getWidth(); ++x, ++pDepth, ++pTex)
			{
				if (*pDepth != 0)
				{
					factor[0] = Colors[colorCount][0];
					factor[1] = Colors[colorCount][1];
					factor[2] = Colors[colorCount][2];

					int nHistValue = m_pDepthHist[*pDepth];
					pTex->r = nHistValue*factor[0];
					pTex->g = nHistValue*factor[1];
					pTex->b = nHistValue*factor[2];

					factor[0] = factor[1] = factor[2] = 1;
					

					if(*pDepth <= 800)
					{
					    //glColor3f(1,0,0);	
						glColor3f(float(*pDepth)/2000,float(*pDepth)/2000,float(*pDepth)/2000);
						
						av_x = x + av_x;
						counter++;
						av_y = y + av_y;
		
							
					}
					else{
						glColor3f(float(*pDepth)/2000,float(*pDepth)/2000,float(*pDepth)/2000);	
		
					}

					glVertex3f(2*x,2*y,-*pDepth);
				}
			}
			pDepthRow += rowSize;
			pTexRow += m_nTexMapX;
		}
		glEnd();
		//////////////////////////////////////////////////////////////////////////////////////////////////////////////////

		av_x = av_x / counter;
	    av_y = av_y / counter;

		float R_x=0;
		float R_y=0;
		float L_x=0;
		float L_y=0;
		int counter_R=0;
		int counter_L=0;

		/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////	

		for (int y = 0; y < depthFrame.getHeight(); ++y)
		{
			const openni::DepthPixel* pDepth = pDepthRow1;
			//chord_temp = 0;
			for (int x = 0; x < depthFrame.getWidth(); ++x, ++pDepth)
			{
				if (*pDepth != 0)
				{

					if(*pDepth <= 800)
					{

						if(x > av_x){
							counter_R++;
							R_x = R_x +x;
							R_y = R_y +y;
						}
						if(x < av_x){
							counter_L++;
							L_x = L_x +x;
							L_y = L_y +y;
						}

					}

				}
			}
			pDepthRow1 += rowSize;
		}
		/////////////////////////////////////////////////////////////////

		R_x = R_x/counter_R;
	    R_y = R_y/counter_R;

		L_x = L_x/counter_L;
	    L_y = L_y/counter_L;

		glPointSize(30);
	    glBegin(GL_POINTS);
		glColor3f(1,0,0);
		glVertex3f(R_x*2,R_y*2,800);
		glColor3f(1,1,0);
		glVertex3f(L_x*2,L_y*2,800);
		glEnd();


		if( R_x >=75 && R_x <=175  ){
			if( R_y  <= 150  )
				{
					note_on[0] = true;
				}

				else if( R_y >= 350)
				{
					note_on[1] = true;
				}
			}
							
		if( R_x >=175 && R_x <=300){
			if( R_y <= 150 )
			{
				note_on[2] = true;	
			}

			else if( R_y >= 350 )
			{	
				note_on[3] = true;					
			}					
		}

		if( R_x>=300 && R_x<=425){
			if( R_y <= 150 )
			{		
				note_on[4] = true;	
			}

			else if( R_y >= 350 )
			{							
				note_on[5] = true;							
			}
		}

		if( R_x>=425 && R_x<=550){
			if( R_y <= 150  )
			{						
				note_on[6] = true;
			}

			else if( R_y >= 350 )
			{			
				note_on[7] = true;	
			}
		}

		////////////////////////////////////////
		if( L_x >=75 && L_x <=175  ){
			if( L_y  <= 150  )
				{
					note_on[0] = true;
				}

				else if( L_y >= 350)
				{
					note_on[1] = true;
				}
			}
							
		if( L_x >=175 && L_x <=300){
			if( L_y <= 150 )
			{
				note_on[2] = true;	
			}

			else if( L_y >= 350 )
			{	
				note_on[3] = true;					
			}					
		}

		if( L_x>=300 && L_x<=425){
			if( L_y <= 150 )
			{		
				note_on[4] = true;	
			}

			else if( L_y >= 350 )
			{							
				note_on[5] = true;							
			}
		}

		if( L_x>=425 && L_x<=550){
			if( L_y <= 150  )
			{						
				note_on[6] = true;
			}

			else if( L_y >= 350 )
			{			
				note_on[7] = true;	
			}
		}
	}




	playdrum();
	for(int i=0;i<=7 ;i++)
		last_note[i] = note_on[i]; 

	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); 
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); 
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); 
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); 

	glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, BMPwidth1,BMPheight1, 0, GL_RGB, GL_UNSIGNED_BYTE, BMPimage1);

	glEnable(GL_BLEND);
	glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
	glColor4f(1,1,1,0.5);

	glEnable(GL_TEXTURE_2D);
	glBegin(GL_QUADS);

	g_nXRes = depthFrame.getVideoMode().getResolutionX();
	g_nYRes = depthFrame.getVideoMode().getResolutionY();

	// upper left
	glTexCoord2f(0,1);
	glVertex3f(0,0,-800);
	// upper right
	glTexCoord2f(1,1);
	glVertex3f(1240,0,-800);
	// bottom right
	glTexCoord2f(1,0);
	glVertex3f(1240,960,-800);
	// bottom left
	glTexCoord2f(0,0);
	glVertex3f(0,960,-800);

	glEnd();
	glDisable(GL_TEXTURE_2D);
	glDisable(GL_BLEND);  

	/////////////////////////////////////////////////////////////////////////////////chord selection

	glBegin(GL_LINES);
	glColor3f(1,0,0);
	glVertex3f(150,300,800);
	glVertex3f(1100,300,800);
	glVertex3f(150,700,800);
	glVertex3f(1100,700,800);
	glEnd();


	glPointSize(30);
	glBegin(GL_POINTS);
	glColor3f(1,1,0);
	glVertex3f(150,300,800);
	glVertex3f(350,300,800);
	glVertex3f(600,300,800);
	glVertex3f(850,300,800);
	glVertex3f(1100,300,800);


	glVertex3f(150,700,800);
	glVertex3f(350,700,800);
	glVertex3f(600,700,800);
	glVertex3f(850,700,800);
	glVertex3f(1100,700,800);
	glEnd();



	////////////////////////////////////////////////////////////////////////////////////////////////////////////
		

	const nite::Array<nite::GestureData>& gestures = handFrame.getGestures();
	for (int i = 0; i < gestures.getSize(); ++i)
	{
		if (gestures[i].isComplete())
		{
			const nite::Point3f& position = gestures[i].getCurrentPosition();
			printf("Gesture %d at (%f,%f,%f)\n", gestures[i].getType(), position.x, position.y, position.z);

			nite::HandId newId;
			m_pHandTracker->startHandTracking(gestures[i].getCurrentPosition(), &newId);
		}
	}

	const nite::Array<nite::HandData>& hands= handFrame.getHands();
	for (int i = 0; i < hands.getSize(); ++i)
	{
		const nite::HandData& user = hands[i];

		if (!user.isTracking())
		{
			printf("Lost hand %d\n", user.getId());
			nite::HandId id = user.getId();
			HistoryBuffer<20>* pHistory = g_histories[id];
			g_histories.erase(g_histories.find(id));
			delete pHistory;
		}
		else
		{
			if (user.isNew())
			{
				printf("Found hand %d\n", user.getId());
				g_histories[user.getId()] = new HistoryBuffer<20>;
			}
			// Add to history
			HistoryBuffer<20>* pHistory = g_histories[user.getId()];
			pHistory->AddPoint(user.getPosition());
			// Draw history
		    DrawHistory(m_pHandTracker, user.getId(), pHistory);
		   
		}
	}

	if (g_drawFrameId)
	{
		DrawFrameId(handFrame.getFrameIndex());
	}
	// Swap the OpenGL display buffers
	glutSwapBuffers();
	
}
Esempio n. 29
0
void KinectHelper::updateDepthFrame(openni::VideoFrameRef frame){
    /// Fetch new depth frame from the frame listener class
    DEBUG_QUEUE qDebug() << "queued depth frame#" << frame.getFrameIndex();
    depthQueue.enqueue(frame);
}