示例#1
0
std::stringstream cloud_xyz_comp(VideoFrameRef frame,VideoStream &stream,bool showstats)
{
	std::stringstream comp_data;
	pcl::PointCloud<pcl::PointXYZ> cloud;
    if(frame.getData() == NULL) std::cout<<"Empty data\n..";
    float px,py,pz;
    DepthPixel *depthp,dp;
    Status status;
    cloud.width = frame.getWidth();
    cloud.height = frame.getHeight();
    cloud.resize(cloud.height * cloud.width);
	int count=0;
	depthp = (DepthPixel*)((char*)frame.getData());
    for(int y=0;y<frame.getHeight();y++)
    {
        for(int x=0;x<frame.getWidth();x++)
        {
            dp=*depthp;
            status = CoordinateConverter::convertDepthToWorld(stream,x,y,dp,&px,&py,&pz);
            if(!handlestatus(status)) exit(0);
            cloud.points[x + (frame.getStrideInBytes() * y)/2].x = (px);
            cloud.points[x + (frame.getStrideInBytes() * y)/2].y = (py);
            cloud.points[x + (frame.getStrideInBytes() * y)/2].z = (pz);
            depthp++;
        }
    }
	pcl::PointCloud<pcl::PointXYZ>::ConstPtr const_cloud(new pcl::PointCloud<pcl::PointXYZ>(cloud));
	pcl::octree::PointCloudCompression<pcl::PointXYZ> *encoder;
	pcl::octree::compression_Profiles_e comp_profile = pcl::octree::LOW_RES_ONLINE_COMPRESSION_WITHOUT_COLOR;
	encoder = new pcl::octree::PointCloudCompression<pcl::PointXYZ>(comp_profile,showstats);
	encoder->encodePointCloud(const_cloud,comp_data);
	return comp_data;
}
示例#2
0
void analyzeFrame(const VideoFrameRef& frame)
{
	DepthPixel* pDepth;
	RGB888Pixel* pColor;

	int middleIndex = (frame.getHeight()+1)*frame.getWidth()/2;

	switch (frame.getVideoMode().getPixelFormat())
	{
	case PIXEL_FORMAT_DEPTH_1_MM:
	case PIXEL_FORMAT_DEPTH_100_UM:
		pDepth = (DepthPixel*)frame.getData();
		printf("[%08llu] %8d\n", (long long)frame.getTimestamp(),
			pDepth[middleIndex]);
		break;
	case PIXEL_FORMAT_RGB888:
		pColor = (RGB888Pixel*)frame.getData();
		printf("[%08llu] 0x%02x%02x%02x\n", (long long)frame.getTimestamp(),
			pColor[middleIndex].r&0xff,
			pColor[middleIndex].g&0xff,
			pColor[middleIndex].b&0xff);
		break;
	default:
		printf("Unknown format\n");
	}
}
示例#3
0
pcl::PointCloud<pcl::PointXYZ> cloud_xyz(VideoFrameRef frame,VideoStream &stream)
{
	pcl::PointCloud<pcl::PointXYZ> cloud;
    if(frame.getData() == NULL) std::cout<<"Empty data\n..";
    float px,py,pz;
    DepthPixel *depthp,dp;
    Status status;
    cloud.width = frame.getWidth();
    cloud.height = frame.getHeight();
    cloud.resize(cloud.height * cloud.width);
	int count=0;
	depthp = (DepthPixel*)((char*)frame.getData());
    for(int y=0;y<frame.getHeight();y++)
    {
        for(int x=0;x<frame.getWidth();x++)
        {
            dp=*depthp;
            status = CoordinateConverter::convertDepthToWorld(stream,x,y,dp,&px,&py,&pz);
            if(!handlestatus(status)) exit(0);
            cloud.points[x + (frame.getStrideInBytes() * y)/2].x = (px);
            cloud.points[x + (frame.getStrideInBytes() * y)/2].y = (py);
            cloud.points[x + (frame.getStrideInBytes() * y)/2].z = (pz);
            depthp++;
        }
    }
	return cloud;
}
/**
 Reads a frame.

 @return If no frame can be read or the frame read is not valid returns false, otherwise returns true.
*/
bool Kinect::readFrame(cv::Mat &frame, CameraMode camMode)
{
	VideoFrameRef irf;
	int hIr, wIr;

	VideoFrameRef colorf; 
	int hColor, wColor;


	switch (camMode)
	{
		case (NI_SENSOR_DEPTH):

			rc = depth.readFrame(&irf);
			if (irf.isValid())
			{
				const uint16_t* imgBufIr = (const uint16_t*)irf.getData();
				hIr = irf.getHeight();
        		wIr = irf.getWidth();
        		frame.create(hIr, wIr, CV_16U);
        		memcpy(frame.data, imgBufIr, hIr * wIr * sizeof(uint16_t));
        		frame.convertTo(frame, CV_8U);
				return true;
			}
			else
			{
				cout << "ERROR: Frame not valid." << endl;
				return false;
			}

		case (NI_SENSOR_COLOR):

			rc = color.readFrame(&colorf);
			if(colorf.isValid())
			{
				const openni::RGB888Pixel* imgBufColor = (const openni::RGB888Pixel*)colorf.getData();
				hColor = colorf.getHeight();
        		wColor = colorf.getWidth();
        		frame.create(hColor, wColor, CV_8UC3);
        		memcpy(frame.data, imgBufColor,  3 * hColor * wColor * sizeof(uint8_t));
        		cvtColor(frame, frame, CV_BGR2RGB);
        		return true;
			}
			else
			{
				cout << "ERROR: Frame not valid." << endl;
				return false;
			}

		default:
			cout << "ERROR: No frame to be read. Object not initialize." << endl;
			return false;
	}
}
void XtionDepthDriverImpl::onNewFrame(VideoStream &stream)
{
  VideoFrameRef ref;
  stream.readFrame(&ref);
  _lastCaptured = XtionDepthImage(ref.getData(), ref.getDataSize(),
    ref.getWidth(), ref.getHeight(), 0, this);
}
示例#6
0
Status ClosestPoint::getNextData(IntPoint3D& closestPoint, VideoFrameRef& rawFrame)
{
	Status rc = m_pInternal->m_pDepthStream->readFrame(&rawFrame);
	if (rc != STATUS_OK)
	{
		printf("readFrame failed\n%s\n", OpenNI::getExtendedError());
	}

	DepthPixel* pDepth = (DepthPixel*)rawFrame.getData();
	bool found = false;
	closestPoint.Z = 0xffff;
	int width = rawFrame.getWidth();
	int height = rawFrame.getHeight();

	for (int y = 0; y < height; ++y)
		for (int x = 0; x < width; ++x, ++pDepth)
		{
			if (*pDepth < closestPoint.Z && *pDepth != 0)
			{
				closestPoint.X = x;
				closestPoint.Y = y;
				closestPoint.Z = *pDepth;
				found = true;
			}
		}

	if (!found)
	{
		return STATUS_ERROR;
	}

	return STATUS_OK;
}
示例#7
0
  virtual void onNewFrame(VideoStream& stream)
  {
    ros::Time ts = ros::Time::now();

    VideoFrameRef frame;
    stream.readFrame(&frame);

    sensor_msgs::Image::Ptr img(new sensor_msgs::Image);
    sensor_msgs::CameraInfo::Ptr info(new sensor_msgs::CameraInfo);

    double scale = double(frame.getWidth()) / double(1280);

    info->header.stamp = ts;
    info->header.frame_id = frame_id_;
    info->width = frame.getWidth();
    info->height = frame.getHeight();
    info->K.assign(0);
    info->K[0] = 1050.0 * scale;
    info->K[4] = 1050.0 * scale;
    info->K[2] = frame.getWidth() / 2.0 - 0.5;
    info->K[5] = frame.getHeight() / 2.0 - 0.5;
    info->P.assign(0);
    info->P[0] = 1050.0 * scale;
    info->P[5] = 1050.0 * scale;
    info->P[2] = frame.getWidth() / 2.0 - 0.5;
    info->P[6] = frame.getHeight() / 2.0 - 0.5;

    switch(frame.getVideoMode().getPixelFormat())
    {
    case PIXEL_FORMAT_GRAY8:
      img->encoding = sensor_msgs::image_encodings::MONO8;
      break;
    case PIXEL_FORMAT_GRAY16:
      img->encoding = sensor_msgs::image_encodings::MONO16;
      break;
    case PIXEL_FORMAT_YUV422:
      img->encoding = sensor_msgs::image_encodings::YUV422;
      break;
    case PIXEL_FORMAT_RGB888:
      img->encoding = sensor_msgs::image_encodings::RGB8;
      break;
    case PIXEL_FORMAT_SHIFT_9_2:
    case PIXEL_FORMAT_DEPTH_1_MM:
      img->encoding = sensor_msgs::image_encodings::TYPE_16UC1;
      break;
    default:
      ROS_WARN("Unknown OpenNI pixel format!");
      break;
    }
    img->header.stamp = ts;
    img->header.frame_id = frame_id_;
    img->height = frame.getHeight();
    img->width = frame.getWidth();
    img->step = frame.getStrideInBytes();
    img->data.resize(frame.getDataSize());
    std::copy(static_cast<const uint8_t*>(frame.getData()), static_cast<const uint8_t*>(frame.getData()) + frame.getDataSize(), img->data.begin());

    publish(img, info);
  }
示例#8
0
void copyFrameToImage(VideoFrameRef frame, DepthImage& image)
{
	if (image.height() != frame.getHeight() || image.width() != frame.getWidth())
	{
		image = DepthImage(
			DepthImage::Data(
			static_cast<const uint16_t*>(frame.getData()),
			static_cast<const uint16_t*>(frame.getData()) + frame.getWidth() * frame.getHeight()),
			frame.getWidth(),
			frame.getHeight());
	}
	else
	{
		image.data(DepthImage::Data(
			static_cast<const uint16_t*>(frame.getData()),
			static_cast<const uint16_t*>(frame.getData()) + frame.getWidth() * frame.getHeight()));
	}
}
示例#9
0
void DepthCallback::analyzeFrame(const VideoFrameRef& frame)
{

    Mat image;
    image.create(frame.getHeight(),frame.getWidth(),CV_16UC1);
    const openni::DepthPixel* pImageRow = (const openni::DepthPixel*)frame.getData();
    memcpy(image.data,pImageRow,frame.getStrideInBytes() * frame.getHeight());
//    image *= 16;
    if (createCVWindow)
    {
        imshow( "DepthWindow", image );
        waitKey(10);
    }

//    cout<<"New depth frame w: "<<frame.getWidth()<<"  h: "<<frame.getHeight()<<endl;

    if (saveOneFrame || saveFrameSequence)
    {
        char buffer[50];
        sprintf(buffer,"depth%lld.png",frame.getTimestamp());
        imwrite(buffer,image);

        saveOneFrame = false;
        std::cout<<"DepthCallback :: saved file "<<buffer<<std::endl;
    }

    if (publishRosMessage)
    {
        cv_bridge::CvImage aBridgeImage;
        aBridgeImage.image = image;
        aBridgeImage.encoding = "mono16";
        cv::flip(aBridgeImage.image,aBridgeImage.image,1);
        sensor_msgs::ImagePtr rosImage = aBridgeImage.toImageMsg();
//        rosImage.get()->header.frame_id="/camera_depth_optical_frame";
        rosImage.get()->header.frame_id=string("/") + string (m_CameraNamespace)+string("_depth_optical_frame");
        rosImage.get()->encoding="16UC1";
        rosImage.get()->header.stamp = ros::Time::now();
        m_RosPublisher.publish(rosImage);

        sensor_msgs::CameraInfo camInfo;
        camInfo.width = frame.getWidth();
        camInfo.height = frame.getHeight();
        camInfo.distortion_model = "plumb_bob";
        camInfo.K = {{570.3422241210938, 0.0, 314.5, 0.0, 570.3422241210938, 235.5, 0.0, 0.0, 1.0}};
        camInfo.R = {{1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0}};
        camInfo.P = {{570.3422241210938, 0.0, 314.5, -21.387834254417157, 0.0, 570.3422241210938, 235.5, 0.0, 0.0, 0.0, 1.0, 0.0}};
        double D[5] = {0.0,0.0,0.0,0.0,0.0};
        camInfo.D.assign(&D[0], &D[0]+5);
        camInfo.header.frame_id = string("/") + string (m_CameraNamespace)+string("_depth_optical_frame");
        camInfo.header.stamp = rosImage.get()->header.stamp;
        m_RosCameraInfoPublisher.publish(camInfo);
    }

}
示例#10
0
void KinectHelper::consume_color(VideoFrameRef frame){
    /// Fetch new color frame from the frame listener class

    /// Get color data from the frame just fetched
    const openni::RGB888Pixel* imageBuffer = (const openni::RGB888Pixel*) frame.getData();

    (*color_back_buffer) = QImage((uchar*) imageBuffer, frame.getWidth(), frame.getHeight(), QImage::Format_RGB888);

    _mutex.lock();
    qSwap(color_front_buffer, color_back_buffer);
    _mutex.unlock();
}
示例#11
0
const short unsigned int *wb_kinect_get_range_image_mm(WbDeviceTag tag) {
  if(kinectenable) {
    int i;
    int changedStreamDummy;
    VideoStream* pStream = &depth;

    rc = OpenNI::waitForAnyStream(&pStream, 1, &changedStreamDummy, SAMPLE_READ_WAIT_TIMEOUT);

    if (rc != STATUS_OK)
    {
      printf("Wait failed! (timeout is %d ms)\n%s\n", SAMPLE_READ_WAIT_TIMEOUT, OpenNI::getExtendedError());
      return NULL;

    }

    rc = depth.readFrame(&frame);

    if (rc != STATUS_OK)
    {
      printf("Read failed!\n%s\n", OpenNI::getExtendedError());
      return NULL;

    }

    if (frame.getVideoMode().getPixelFormat() != PIXEL_FORMAT_DEPTH_1_MM && frame.getVideoMode().getPixelFormat() != PIXEL_FORMAT_DEPTH_100_UM)
    {
      printf("Unexpected frame format\n");
      return NULL;
    }

    DepthPixel* pDepth = (DepthPixel*)frame.getData();
    height_pixels = frame.getHeight();
    width_pixels = frame.getWidth();

    if (width_pixels>160){
    //Fix blind column
    	for(i=0;i<height_pixels;i++){
          pDepth[i*width_pixels + 0] = pDepth[i*width_pixels + 3]; 
          pDepth[i*width_pixels + 1] = pDepth[i*width_pixels + 3];  
          pDepth[i*width_pixels + 2] = pDepth[i*width_pixels + 3];  
    	}
    } else {
        for(i=0;i<height_pixels;i++){
          pDepth[i*width_pixels + 0] = pDepth[i*width_pixels + 1];
        }
    }

    return pDepth;
  }
  // kinect not enable
  fprintf(stderr, "2.Please enable the kinect before to use wb_kinect_get_range_image()\n");
  return NULL;
}
示例#12
0
//	save depth images
void saveDepthImage(const std::string name, const VideoFrameRef &depthFrame)
{
	int height = depthFrame.getHeight();
	int width = depthFrame.getWidth();
	cv::Mat image = cv::Mat::zeros(height, width, CV_16UC1);
	DepthPixel *pDepth = (DepthPixel *)depthFrame.getData();
	for (int i = 0; i < height; i++)
	{
		for (int j = 0; j < width; j++)
		{
			image.at<uint16_t>(i, j) = (uint16_t)(*pDepth);
			pDepth++;
		}
	}
	cv::imwrite(name, image);
}
示例#13
0
BBox3 KinectHelper::compute_frame_bbox(VideoFrameRef frame){
    // qDebug() << "KinectThread::setBoundingBox(VideoFrameRef frame)";

    /// Get depth data from the frame just fetched
    const openni::DepthPixel* pDepth = (const openni::DepthPixel*)frame.getData();
    float wx,wy,wz;
    BBox3 box; box.setNull();
    for (int y = 0; y<frame.getHeight(); ++y){
        for(int x = 0; x<frame.getWidth(); ++x){
            DepthPixel d = *pDepth;
            pDepth++;
            CoordinateConverter::convertDepthToWorld(depth, x, y, d, &wx, &wy, &wz);
            box.extend( Vector3(wx,wy,wz) );
        }
    }
    this->_bbox = box;
}
示例#14
0
bool OpenNI2CameraImpl::copyInfrared( OpenNI2Camera::FrameView& frame )
{
    frame.infraredUpdated = false;

    VideoFrameRef src;
    Status rc = m_infraredStream.readFrame( &src );
    if( rc == STATUS_OK )
    {
        Array2DReadView< uint16_t > srcData( src.getData(),
            { src.getWidth(), src.getHeight() },
            { sizeof( uint16_t ), src.getStrideInBytes() } );
        frame.infraredTimestampNS =
            usToNS( static_cast< int64_t >( src.getTimestamp() ) );
        frame.infraredFrameNumber = src.getFrameIndex();
        frame.infraredUpdated = copy( srcData, frame.infrared );
        return frame.infraredUpdated;
    }
    return false;
}
示例#15
0
文件: libnui.cpp 项目: ankitkv/NUI
Status NUIPoints::getNextData(std::list<cv::Point3f>& nuiPoints, VideoFrameRef& rawFrame)
{
	Status rc = m_pInternal->m_pDepthStream->readFrame(&rawFrame);
	if (rc != STATUS_OK)
	{
		printf("readFrame failed\n%s\n", OpenNI::getExtendedError());
	}

	DepthPixel* pDepth = (DepthPixel*)rawFrame.getData();
	bool found = false;
	cv::Point3f nuiPoint;
	int width = rawFrame.getWidth();
	int height = rawFrame.getHeight();

	for (int x = 1; x < width / 6.0; ++x) {
		for (int y = 1; y < height - (height / 4.5); ++y)
		{
			if (pDepth[y * width + x] < FAR_LIMIT
				&& pDepth[y * width + x] != 0
				&& (ZERO_TO_INFINITY(pDepth[(y + 1) * width + (x + 1)]) - pDepth[y * width + x] > DEPTH_TRESHOLD
				|| ZERO_TO_INFINITY(pDepth[(y - 1) * width + (x + 1)]) - pDepth[y * width + x] > DEPTH_TRESHOLD
				|| ZERO_TO_INFINITY(pDepth[(y + 1) * width + (x - 1)]) - pDepth[y * width + x] > DEPTH_TRESHOLD
				|| ZERO_TO_INFINITY(pDepth[(y - 1) * width + (x - 1)]) - pDepth[y * width + x] > DEPTH_TRESHOLD))
			{
				nuiPoint.x = x;
				nuiPoint.y = y;
				nuiPoint.z = pDepth[y * width + x];
				nuiPoints.push_back(nuiPoint);
			}
		}
	}

	if (nuiPoints.empty())
	{
		return STATUS_ERROR;
	}

	return STATUS_OK;
}
示例#16
0
void KinectHelper::consume_depth(VideoFrameRef frame){
    /// Get depth data from the frame just fetched
    const openni::DepthPixel* pDepth = (const openni::DepthPixel*)frame.getData();

    float wx,wy,wz;
    
    /// NaN values must be handled well and not displayed by OpenGL
    for (int y = 0; y<frame.getHeight(); ++y){
        for(int x = 0; x<frame.getWidth(); ++x){
            DepthPixel d = *pDepth;
            pDepth++;
            /// Convert depth coordinates to world coordinates to remove camera intrinsics
            CoordinateConverter::convertDepthToWorld(depth, x, y, d, &wx, &wy, &wz);
            /// Change the coordinates of the vertices in the model
            (*points_back_buffer)(y,x) = Vector3(wx,wy,wz);
        }
    }
    
    /// Now swap front and back buffers
    _mutex.lock();
        qSwap(points_front_buffer, points_back_buffer);
    _mutex.unlock();    
}
示例#17
0
int
main (int argc, char** argv)
{
    Status rc = OpenNI::initialize();
    if (rc != STATUS_OK)
    {
        std::cout << "Initialize failed: " << OpenNI::getExtendedError() << std::endl;
        return 1;
    }

    Device device;
    rc = device.open(ANY_DEVICE);
    if (rc != STATUS_OK)
    {
        std::cout << "Couldn't open device: " << OpenNI::getExtendedError() << std::endl;
        return 2;
    }

    VideoStream stream;
    if (device.getSensorInfo(currentSensor) != NULL)
    {
        rc = stream.create(device, currentSensor);
        if (rc != STATUS_OK)
        {
            std::cout << "Couldn't create stream: " << OpenNI::getExtendedError() << std::endl;
            return 3;
        }
    }

    rc = stream.start();
    if (rc != STATUS_OK)
    {
        std::cout << "Couldn't start the stream: " << OpenNI::getExtendedError() << std::endl;
        return 4;
    }

    VideoFrameRef frame;

    //now open the video writer
    Size S = Size(stream.getVideoMode().getResolutionX(),
                  stream.getVideoMode().getResolutionY());
    VideoWriter outputVideo;
    std::string fileName = "out.avi";
    outputVideo.open(fileName, -1, stream.getVideoMode().getFps(), S, currentSensor == SENSOR_COLOR ? true : false);
    if (!outputVideo.isOpened())
    {
        std::cout  << "Could not open the output video for write: " << fileName << std::endl;
        return -1;
    }

    while (waitKey(50) == -1)
    {
        int changedStreamDummy;
        VideoStream* pStream = &stream;
        rc = OpenNI::waitForAnyStream(&pStream, 1, &changedStreamDummy, SAMPLE_READ_WAIT_TIMEOUT);
        if (rc != STATUS_OK)
        {
            std::cout << "Wait failed! (timeout is " << SAMPLE_READ_WAIT_TIMEOUT << "ms): " << OpenNI::getExtendedError() << std::endl;
            continue;
        }

        rc = stream.readFrame(&frame);
        if (rc != STATUS_OK)
        {
            std::cout << "Read failed:" << OpenNI::getExtendedError() << std::endl;
            continue;
        }

        Mat image;
        switch (currentSensor)
        {
        case SENSOR_COLOR:
            image = Mat(frame.getHeight(), frame.getWidth(), CV_8UC3, (void*)frame.getData());
            break;
        case SENSOR_DEPTH:
            image = Mat(frame.getHeight(), frame.getWidth(), DataType<DepthPixel>::type, (void*)frame.getData());
            break;
        case SENSOR_IR:
            image = Mat(frame.getHeight(), frame.getWidth(), CV_8U, (void*)frame.getData());
            break;
        default:
            break;
        }

        namedWindow( "Display window", WINDOW_AUTOSIZE );    // Create a window for display.
        imshow( "Display window", image );                   // Show our image inside it.

        outputVideo << image;
    }

    stream.stop();
    stream.destroy();
    device.close();
    OpenNI::shutdown();

    return 0;
}
示例#18
0
int main()
{
	// 2. initialize OpenNI
	Status rc = OpenNI::initialize();
	if (rc != STATUS_OK)
	{
		printf("Initialize failed\n%s\n", OpenNI::getExtendedError());
		return 1;
	}

	// 3. open a device
	Device device;
	rc = device.open(ANY_DEVICE);
	if (rc != STATUS_OK)
	{
		printf("Couldn't open device\n%s\n", OpenNI::getExtendedError());
		return 2;
	}

	// 4. create depth stream
	VideoStream depth;
	if (device.getSensorInfo(SENSOR_DEPTH) != NULL){
		rc = depth.create(device, SENSOR_DEPTH);
		if (rc != STATUS_OK){
			printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError());
			return 3;
		}
	}
	VideoStream color;
	if (device.getSensorInfo(SENSOR_COLOR) != NULL){
		rc = color.create(device, SENSOR_COLOR);
		if (rc != STATUS_OK){
			printf("Couldn't create color stream\n%s\n", OpenNI::getExtendedError());
			return 4;
		}
	}

	// 5. create OpenCV Window
	cv::namedWindow("Depth Image", CV_WINDOW_AUTOSIZE);
	cv::namedWindow("Color Image", CV_WINDOW_AUTOSIZE);

	// 6. start
	rc = depth.start();
	if (rc != STATUS_OK)
	{
		printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
		return 5;
	}
	rc = color.start();
	if (rc != STATUS_OK){
		printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
		return 6;
	}
	
	VideoFrameRef colorframe;
	VideoFrameRef depthframe;
	int iMaxDepth = depth.getMaxPixelValue();
	int iColorFps = color.getVideoMode().getFps();
	cv::Size iColorFrameSize = cv::Size(color.getVideoMode().getResolutionX(), color.getVideoMode().getResolutionY());

	cv::Mat colorimageRGB;
	cv::Mat colorimageBGR;
	cv::Mat depthimage;
	cv::Mat depthimageScaled;

#ifdef F_RECORDVIDEO

	cv::VideoWriter outputvideo_color;
	cv::FileStorage outputfile_depth;
	
	time_t timenow = time(0);
	tm ltime;
	localtime_s(&ltime, &timenow);
	int tyear = 1900 + ltime.tm_year;
	int tmouth = 1 + ltime.tm_mon;
	int tday = ltime.tm_mday;
	int thour = ltime.tm_hour;
	int tmin = ltime.tm_min;
	int tsecond = ltime.tm_sec;

	string filename_rgb = "RGB/rgb_" + to_string(tyear) + "_" + to_string(tmouth) + "_" + to_string(tday)
		+ "_" + to_string(thour) + "_" + to_string(tmin) + "_" + to_string(tsecond) + ".avi";
	string filename_d = "D/d_" + to_string(tyear) + "_" + to_string(tmouth) + "_" + to_string(tday)
		+ "_" + to_string(thour) + "_" + to_string(tmin) + "_" + to_string(tsecond) + ".yml";

	outputvideo_color.open(filename_rgb, CV_FOURCC('I', '4', '2', '0'), iColorFps, iColorFrameSize, true);
	if (!outputvideo_color.isOpened()){
		cout << "Could not open the output color video for write: " << endl;
		return 7;
	}
	outputfile_depth.open(filename_d, cv::FileStorage::WRITE);
	if (!outputfile_depth.isOpened()){
		cout << "Could not open the output depth file for write: " << endl;
		return 8;
	}

#endif // F_RECORDVIDEO


	// 7. main loop, continue read
	while (!wasKeyboardHit())
	{
		// 8. check is color stream is available
		if (color.isValid()){
			if (color.readFrame(&colorframe) == STATUS_OK){
				colorimageRGB = { colorframe.getHeight(), colorframe.getWidth(), CV_8UC3, (void*)colorframe.getData() };
				cv::cvtColor(colorimageRGB, colorimageBGR, CV_RGB2BGR);
			}
		}

		// 9. check is depth stream is available
		if (depth.isValid()){
			if (depth.readFrame(&depthframe) == STATUS_OK){
				depthimage = { depthframe.getHeight(), depthframe.getWidth(), CV_16UC1, (void*)depthframe.getData() };
				depthimage.convertTo(depthimageScaled, CV_8U, 255.0 / iMaxDepth);
			}
		}

		cv::imshow("Color Image", colorimageBGR);
		cv::imshow("Depth Image", depthimageScaled);

#ifdef F_RECORDVIDEO
		
		outputvideo_color << colorimageBGR;
		outputfile_depth << "Mat" << depthimage;

#endif // F_RECORDVIDEO

		cv::waitKey(10);
	}

	color.stop();
	depth.stop();

	color.destroy();
	depth.destroy();

	device.close();
	OpenNI::shutdown();

	return 0;
}
示例#19
0
void KinectHelper::consume_depth(VideoFrameRef frame){
    /// Get depth data from the frame just fetched
    const openni::DepthPixel* pDepth = (const openni::DepthPixel*)frame.getData(); // for point cloud
    const openni::DepthPixel* iDepth = (const openni::DepthPixel*)frame.getData(); // for depth image

    /// Depth Image
    (*depth_back_buffer) = QImage(frame.getWidth(), frame.getHeight(), QImage::Format_RGB32);

    for (unsigned nY=0; nY<frame.getHeight(); nY++)
    {
        uchar *imageptr = (*depth_back_buffer).scanLine(nY);
        for (unsigned nX=0; nX < frame.getWidth(); nX++)
        {
            unsigned depth = *iDepth;
            unsigned maxdist=10000;
            if(depth>maxdist) depth=maxdist;
            if(depth)
            {
                depth = (depth*255)/maxdist;
                //depth = (maxdist-depth)*255/maxdist+1;
            }
            // depth: 0: invalid
            // depth: 255: closest
            // depth: 1: furtherst (maxdist distance)
            // Here we could do depth*color, to show the colored depth
            imageptr[0] = depth;
            imageptr[1] = depth;
            imageptr[2] = depth;
            imageptr[3] = 0xff;
            iDepth++;
            imageptr+=4;
        }
    }


    _mutex.lock();
    qSwap(depth_front_buffer, depth_back_buffer);
    _mutex.unlock();


    /// Point Cloud

    float wx,wy,wz;

    /// NaN values must be handled well and not displayed by OpenGL
    for (int y = 0; y<frame.getHeight(); ++y){
        for(int x = 0; x<frame.getWidth(); ++x){
            DepthPixel d = *pDepth;
            pDepth++;
            /// Convert depth coordinates to world coordinates to remove camera intrinsics
            CoordinateConverter::convertDepthToWorld(depth, x, y, d, &wx, &wy, &wz);
            /// Change the coordinates of the vertices in the model
            (*points_back_buffer)(y,x) = Vector3(wx,wy,wz);
        }
    }

    /// Now swap front and back buffers
    _mutex.lock();
        qSwap(points_front_buffer, points_back_buffer);
    _mutex.unlock();
}
示例#20
0
void* camera_thread(void*){
  VideoFrameRef frame;
  VideoFrameRef rgbframe;
  int changed_stream;
  VideoStream* pStream;
  VideoStream* pRgbStream;
  sensor_msgs::CameraInfo rgb_info;
  sensor_msgs::CameraInfo depth_info;
  sensor_msgs::Image image;
  image.header.frame_id=frame_id;
  image.is_bigendian=0;

  int count = 0;
  int num_frames_stat=64;
  struct timeval previous_time;
  gettimeofday(&previous_time, 0);
  uint64_t last_stamp = 0;
  while(run){
    bool new_frame = false;
    uint64_t current_stamp = 0;
    openni::OpenNI::waitForAnyStream(streams, 2, &changed_stream);
    switch (changed_stream)
      {
	//DEPTH
      case 0:
	depth.readFrame(&frame);
	depth_info.header.stamp=ros::Time::now();
	image.header.stamp=ros::Time::now();
	if(!frame.isValid()) break;
	current_stamp=frame.getTimestamp();

	if (current_stamp-last_stamp>5000){
	  count++;
	  new_frame = true;
	}
	last_stamp = current_stamp;

	if (! (count % (_frame_skip)) ){
	  image.header.seq = count;
	  if (! _registration){
	    image.header.frame_id=frame_id+"_depth";
	  } else 
	    image.header.frame_id=frame_id+"_rgb";

	  depth_info.width=frame.getWidth();
	  depth_info.height=frame.getHeight();

	  depth_info.header.seq = count;

    depth_info.distortion_model = sensor_msgs::distortion_models::PLUMB_BOB;
    depth_info.D.resize(5, 0.0);

	  depth_info.K[0]=depth_info.width/(2*tan(depth.getHorizontalFieldOfView()/2)); //fx
	  depth_info.K[4]=depth_info.height/(2*tan(depth.getVerticalFieldOfView()/2));; //fy
	  depth_info.K[2]=depth_info.width/2; //cx
	  depth_info.K[5]=depth_info.height/2; //cy
	  depth_info.K[8]=1;

	  depth_info.header.frame_id=image.header.frame_id;
	  image.height=frame.getHeight();
	  image.width=frame.getWidth();
	  image.encoding="mono16";
	  image.step=frame.getWidth()*2;
	  image.data.resize(image.step*image.height);
	  memcpy((char*)(&image.data[0]),frame.getData(),image.height*image.width*2);
	  pub_depth.publish(image);
	  pub_camera_info_depth.publish(depth_info);
	}
	break;
	//RGB
      case 1:
	rgb.readFrame(&rgbframe);
	image.header.stamp=ros::Time::now();
	rgb_info.header.stamp=ros::Time::now();
	if(!rgbframe.isValid()) break;
	current_stamp=rgbframe.getTimestamp();

	if (current_stamp-last_stamp>5000){
	  count++;
	  new_frame = true;
	}
	last_stamp = current_stamp;

	if (_gain>=0) {
	  if (count > 64 && gain_status==0){
	    rgb.getCameraSettings()->setAutoExposureEnabled(false);
	    rgb.getCameraSettings()->setAutoWhiteBalanceEnabled(false);
	    rgb.getCameraSettings()->setExposure(1);
	    rgb.getCameraSettings()->setGain(100);
	    gain_status=1;
	  } else if (count > 128 && gain_status==1){
	    rgb.getCameraSettings()->setExposure(_exposure);
	    rgb.getCameraSettings()->setGain(_gain);
	    gain_status=2;
	  }
	}

	if (! (count%(_frame_skip)) ){
	  rgb_info.header.frame_id=frame_id+"_rgb";
	  rgb_info.width=rgbframe.getWidth();
	  rgb_info.height=rgbframe.getHeight();
	  rgb_info.header.seq = count;
	  rgb_info.K[0]=rgb_info.width/(2*tan(rgb.getHorizontalFieldOfView()/2)); //fx
	  rgb_info.K[4]=rgb_info.height/(2*tan(rgb.getVerticalFieldOfView()/2));; //fy
	  rgb_info.K[2]=rgb_info.width/2; //cx
	  rgb_info.K[5]=rgb_info.height/2; //cy
	  rgb_info.K[8]=1;
	  
	  image.header.seq = count;
	  image.header.frame_id=frame_id+"_rgb";
	  image.height=rgbframe.getHeight();
	  image.width=rgbframe.getWidth();
	  image.encoding="rgb8";
	  image.step=rgbframe.getWidth()*3;
	  image.data.resize(image.step*image.height);
	  memcpy((char*)(&image.data[0]),rgbframe.getData(),image.height*image.width*3);
	  pub_rgb.publish(image);
	  pub_camera_info_rgb.publish(rgb_info);
	}
	break;
      default:
	printf("Error in wait\n");
      }


    if (!(count%num_frames_stat) && new_frame){
      struct timeval current_time, interval;
      gettimeofday(&current_time, 0);
      timersub(&current_time, &previous_time, &interval);
      previous_time = current_time;
      double fps = num_frames_stat/(interval.tv_sec +1e-6*interval.tv_usec);
      printf("running at %lf fps\n", fps);
      fflush(stdout);
    }
  }
}
示例#21
0
int main()
{
	// 2. initialize OpenNI
	Status rc = OpenNI::initialize();
	if (rc != STATUS_OK)
	{
		printf("Initialize failed\n%s\n", OpenNI::getExtendedError());
		return 1;
	}

	// 3. open a device
	Device device;
	rc = device.open(ANY_DEVICE);
	if (rc != STATUS_OK)
	{
		printf("Couldn't open device\n%s\n", OpenNI::getExtendedError());
		return 2;
	}

	// 4. create depth stream
	VideoStream depth;
	if (device.getSensorInfo(SENSOR_DEPTH) != NULL){
		rc = depth.create(device, SENSOR_DEPTH);
		if (rc != STATUS_OK){
			printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError());
			return 3;
		}
	}
	VideoStream color;
	if (device.getSensorInfo(SENSOR_COLOR) != NULL){
		rc = color.create(device, SENSOR_COLOR);
		if (rc != STATUS_OK){
			printf("Couldn't create color stream\n%s\n", OpenNI::getExtendedError());
			return 4;
		}
	}

	// 5. create OpenCV Window
	cv::namedWindow("Depth Image", CV_WINDOW_AUTOSIZE);
	cv::namedWindow("Color Image", CV_WINDOW_AUTOSIZE);

	// 6. start
	rc = depth.start();
	if (rc != STATUS_OK)
	{
		printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
		return 5;
	}
	rc = color.start();
	if (rc != STATUS_OK){
		printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
		return 6;
	}
	VideoFrameRef colorframe;
	VideoFrameRef depthframe;
	int iMaxDepth = depth.getMaxPixelValue();

	cv::Mat colorimageRGB;
	cv::Mat colorimageBGR;
	cv::Mat depthimage;
	cv::Mat depthimageScaled;

	// 7. main loop, continue read
	while (!wasKeyboardHit())
	{
		// 8. check is color stream is available
		if (color.isValid()){
			if (color.readFrame(&colorframe) == STATUS_OK){
				colorimageRGB = { colorframe.getHeight(), colorframe.getWidth(), CV_8UC3, (void*)colorframe.getData() };
				cv::cvtColor(colorimageRGB, colorimageBGR, CV_RGB2BGR);
			}
		}

		// 9. check is depth stream is available
		if (depth.isValid()){
			if (depth.readFrame(&depthframe) == STATUS_OK){
				depthimage = { depthframe.getHeight(), depthframe.getWidth(), CV_16UC1, (void*)depthframe.getData() };
				depthimage.convertTo(depthimageScaled, CV_8U, 255.0 / iMaxDepth);
			}
		}

		cv::imshow("Color Image", colorimageBGR);
		cv::imshow("Depth Image", depthimageScaled);
		cv::waitKey(10);
	}

	color.stop();
	depth.stop();

	color.destroy();
	depth.destroy();

	device.close();
	OpenNI::shutdown();

	return 0;
}
示例#22
0
		void ONIKinectDevice::onNewDepthFrame(VideoFrameRef frame)
		{
			//printf("[%08llu] Depth Frame\n", (long long)frame.getTimestamp());

			RGBDFramePtr rgbdFrame;
			//Make sure frame is in right format
			if(frame.getVideoMode().getPixelFormat() == PIXEL_FORMAT_DEPTH_1_MM || 
				frame.getVideoMode().getPixelFormat() == PIXEL_FORMAT_DEPTH_100_UM )
			{
				int width = frame.getVideoMode().getResolutionX();
				int height = frame.getVideoMode().getResolutionY();

				//Initialize frame if not initialized
				if(rgbdFrame == NULL)
				{
					rgbdFrame = mFrameFactory.getRGBDFrame(width,height);
				}

				if(width == rgbdFrame->getXRes()  && height == rgbdFrame->getYRes())
				{
					//Data array valid. Fill it
					//TODO: Use more efficient method of transfering memory. (like memcopy, or plain linear indexing?)
					DPixelArray data = rgbdFrame->getDepthArray();
					//TODO: Enable cropping

					const openni::DepthPixel* pDepthRowONI = (const openni::DepthPixel*)frame.getData();
					DPixel* pDepthRow = data.get();
					for(int y = 0; y < height; y++)
					{
						DPixel* pDepth = pDepthRow;
						const openni::DepthPixel* pDepthONI = pDepthRowONI;

						for(int x = 0; x < width; ++x, ++pDepth, ++pDepthONI)
						{
							pDepth->depth = *pDepthONI;
						}
						pDepthRow += width;
						pDepthRowONI += width;
					}


					rgbdFrame->setDepthTimestamp(frame.getTimestamp());
					rgbdFrame->setHasDepth(true);

					//Check if send
					if(!mSyncDepthAndColor)
					{
						//Send it
						onNewRGBDFrame(rgbdFrame);
						rgbdFrame = NULL;
					}else{
						//Sync it
						frameGuard.lock();
						if(mRGBDFrameSynced == NULL)
						{
							//FIRST POST!!!
							mRGBDFrameSynced = rgbdFrame;
						}else{
							//SECOND!!
							//Send it
							mRGBDFrameSynced->setDepthArray(rgbdFrame->getDepthArray());
							mRGBDFrameSynced->setHasDepth(true);
							mRGBDFrameSynced->setDepthTimestamp(frame.getTimestamp());
							onNewRGBDFrame(mRGBDFrameSynced);
							mRGBDFrameSynced = NULL;
						}
						//Unlock scoped
						frameGuard.unlock();
					}

				}else{
					//Size error
					onMessage("Error: depth and color frames don't match in size\n");	
				}

			}else{
				//Format error
				onMessage("Error: depth format incorrect\n");	
			}
		}
示例#23
0
int main()
{
	FILE *fptrI = fopen("C:\\Users\\Alan\\Documents\\ShapeFeatures.csv","w");
	fprintf(fptrI, "Classtype, Area, Perimeter, Circularity, Extent\n");
	fclose(fptrI);

	Mat input = imread("C:\\Users\\Alan\\Pictures\\Science Fair 2014\\SVM\\Shape Features\\Fingers.bmp", 1);
	Mat input2 = imread("C:\\Users\\Alan\\Pictures\\Science Fair 2014\\SVM\\Shape Features\\NotFingers.bmp", 1);
	Mat inputF = imread("C:\\Users\\Alan\\Pictures\\Science Fair 2014\\SVM\\Shape Features\\ImageFeaturesBinaryF.bmp", 1);
	Mat gray(input.rows, input.cols, CV_8UC3);
	Mat gray2(input.rows, input.cols, CV_8UC3);
	Mat grayF(input.rows, input.cols, CV_8UC3);
	cvtColor(input, gray, CV_BGR2GRAY);
	cvtColor(input2, gray2, CV_BGR2GRAY);
	cvtColor(inputF, grayF, CV_BGR2GRAY);
	shapeFeatures(gray, input, 1);
	shapeFeatures(gray2, input2, 2);
	namedWindow("Image");
	imshow("Image", input);
	namedWindow("Image2");
	imshow("Image2", input2);

	//------------------------------------------------------
	//--------[SVM]--------
	// Read input data from file created above
	double parameters[5];
	vector<double> svmI, svmA, svmP, svmC, svmE;
	int size = 1;
	double index = 0; double area = 0; double perimeter = 0; double circularity = 0;
	char buffer[1024];
	char *record, *line;
	FILE* fptrR = fopen("C:\\Users\\Alan\\Documents\\ShapeFeatures.csv", "r");
	fscanf(fptrR, "%*[^\n]\n", NULL);

	svmI.resize(size); svmA.resize(size); svmP.resize(size); svmC.resize(size); 

	while((line=fgets(buffer, sizeof(buffer), fptrR))!=NULL)
	{
		size++;
		svmI.resize(size);
		svmA.resize(size);
		svmP.resize(size);
		svmC.resize(size);
		svmE.resize(size);

		record = strtok(line, ";");
		for(int i = 0; i < 5; i++);
		{
			double value = atoi(record);
			record = strtok(line,";");
		}
		char *lineCopy = record;
		char *pch;

		pch = strtok(lineCopy, ",");
		parameters[0] = atoi(pch);
		
		int j = 1;
		while( j < 5 )
		{
			pch = strtok (NULL, ",");
			parameters[j] = atof(pch);
			j++;
		}
		svmI[size-1] = parameters[0];
		svmA[size-1] = parameters[1];
		svmP[size-1] = parameters[2];
		svmC[size-1] = parameters[3];
		svmE[size-1] = parameters[4];
	}
	fclose(fptrR);
	//---------------------
	// Data for visual representation
    int width = 512, height = 512;
    Mat image = Mat::zeros(height, width, CV_8UC3);

    // Set up training data
    //float labels[8] = {1.0, -1.0, -1.0, -1.0};
	float labels[1000];
	for(int i = 0; i < svmI.size()-1; i++)
	{
		labels[i] = svmI[i+1];
	}
    Mat labelsMat(1000, 1, CV_32FC1, labels);

    float trainingData[1000][4];
	for(int i = 0; i < svmE.size()-1; i++)
	{
		trainingData[i][0] = svmE[i+1];
		trainingData[i][1] = svmC[i+1];
		trainingData[i][2] = svmA[i+1];
		trainingData[i][3] = svmP[i+1];
	}
    Mat trainingDataMat(1000, 4, CV_32FC1, trainingData);

    // Set up SVM's parameters
    CvSVMParams params;
	params = SVMFinger.get_params();
    //params.svm_type    = CvSVM::C_SVC;
    //params.kernel_type = CvSVM::LINEAR;
    //params.term_crit   = cvTermCriteria(CV_TERMCRIT_ITER, 100, 1e-6);

    // Train the SVM
    SVMFinger.train_auto(trainingDataMat, labelsMat, Mat(), Mat(), params);

//	Mat sampleMat = (Mat_<float>(1,2) << 138.5, 57);
//	float response = SVMFinger.predict(sampleMat);

	waitKey();
	destroyWindow("Image");
	destroyWindow("Image2");

	//------------------------------------------
	OpenNI::initialize();

	Device devAnyDevice;
    devAnyDevice.open(ANY_DEVICE);

	//----------------[Define Video Settings]-------------------
	//Set Properties of Depth Stream
	VideoMode mModeDepth;
	mModeDepth.setResolution( 640, 480 );
	mModeDepth.setFps( 30 );
	mModeDepth.setPixelFormat( PIXEL_FORMAT_DEPTH_100_UM );

	//Set Properties of Color Stream
	VideoMode mModeColor;
    mModeColor.setResolution( 640, 480 );
    mModeColor.setFps( 30 );
    mModeColor.setPixelFormat( PIXEL_FORMAT_RGB888 );
	//----------------------------------------------------------
	//----------------------[Initial Streams]---------------------
	VideoStream streamInitDepth;
    streamInitDepth.create( devAnyDevice, SENSOR_DEPTH );

	VideoStream streamInitColor;
    streamInitColor.create( devAnyDevice, SENSOR_COLOR );

	streamInitDepth.setVideoMode( mModeDepth );
	streamInitColor.setVideoMode( mModeColor );

	namedWindow( "Depth Image (Init)",  CV_WINDOW_AUTOSIZE );
    namedWindow( "Color Image (Init)",  CV_WINDOW_AUTOSIZE );
	//namedWindow( "Thresholded Image (Init)", CV_WINDOW_AUTOSIZE );

	VideoFrameRef  frameDepthInit;
    VideoFrameRef  frameColorInit;

	streamInitDepth.start();
	streamInitColor.start();
	cv::Mat BackgroundFrame;

	int avgDist = 0;
	int iMaxDepthInit = streamInitDepth.getMaxPixelValue();
	
	OutX.clear();
	OutY.clear();

	vector<int> OldOutX, OldOutY;
	OldOutX.clear();
	OldOutY.clear();
	//------------------------------------------------------------
	//--------------------[Initiation Process]--------------------
	while( true )
	{
		streamInitDepth.readFrame( &frameDepthInit );
		streamInitColor.readFrame( &frameColorInit );

		const cv::Mat mImageDepth( frameDepthInit.getHeight(), frameDepthInit.getWidth(), CV_16UC1, (void*)frameDepthInit.getData());

        cv::Mat mScaledDepth;
        mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepthInit );

        cv::imshow( "Depth Image (Init)", mScaledDepth );

        const cv::Mat mImageRGB(frameColorInit.getHeight(), frameColorInit.getWidth(), CV_8UC3, (void*)frameColorInit.getData());

        cv::Mat cImageBGR;
        cv::cvtColor( mImageRGB, cImageBGR, CV_RGB2BGR );

		//--------------------[Get Average Distance]---------------------
		int depthVal = 0;
		int frameHeight = frameDepthInit.getHeight();
		int frameWidth = frameDepthInit.getWidth();
		//------------
		//backgroundDepth.resize(frameHeight * frameWidth);
		//---------------------------------------------------------------
		
		int initCount = 0;
		for(int i = 0; i < frameHeight; i++)
		{
			for(int j = 0; j < frameWidth; j++)
			{
				depthVal = mImageDepth.at<unsigned short>(i, j) + depthVal;
				initCount++;
			}
		}
		avgDist = depthVal / ((frameHeight) * (frameWidth));

		cout << "Average Distance: " << avgDist << endl;
		cv::imshow( "Color Image (Init)", cImageBGR );

		if( cv::waitKey(1) == 'q')
		{
			mImageDepth.copyTo(BackgroundFrame);
            break;
		}
	}

	streamInitDepth.destroy();
	streamInitColor.destroy();

	destroyWindow( "Depth Image (Init)" );
	destroyWindow( "Color Image (Init)" );

	VideoStream streamDepth;
    streamDepth.create( devAnyDevice, SENSOR_DEPTH );

	VideoStream streamColor;
    streamColor.create( devAnyDevice, SENSOR_COLOR );

	streamDepth.setVideoMode( mModeDepth );
	streamColor.setVideoMode( mModeColor );

	streamDepth.start();
    streamColor.start();

	namedWindow( "Depth Image",  CV_WINDOW_AUTOSIZE );
    namedWindow( "Color Image",  CV_WINDOW_AUTOSIZE );
	namedWindow( "Thresholded Image", CV_WINDOW_AUTOSIZE );

	int iMaxDepth = streamDepth.getMaxPixelValue();

    VideoFrameRef  frameColor;
	VideoFrameRef  frameDepth;

	OutX.clear();
	OutY.clear();
	//------------------------------------------------------------

	//------------------------------------------------------------
	//-----------------------[Main Process]-----------------------
	while( true ) 
    {
        streamDepth.readFrame( &frameDepth );
        streamColor.readFrame( &frameColor );

        const cv::Mat mImageDepth( frameDepth.getHeight(), frameDepth.getWidth(), CV_16UC1, (void*)frameDepth.getData());

        cv::Mat mScaledDepth;
        mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepth );

		////////////////////////////////////////////////////////////////////////////////////////////
		//---------------------[Downsampling]-------------------------------------------------------
		double min;
		double max;
		cv::minMaxIdx(mImageDepth, &min, &max);
		cv::Mat adjMap;
		// expand your range to 0..255. Similar to histEq();
		float scale = 255 / (max-min);
		mImageDepth.convertTo(adjMap,CV_8UC1, scale, -min*scale); 

		// this is great. It converts your grayscale image into a tone-mapped one, 
		// much more pleasing for the eye
		// function is found in contrib module, so include contrib.hpp 
		// and link accordingly
		cv::Mat falseColorsMap;
		applyColorMap(adjMap, falseColorsMap, cv::COLORMAP_AUTUMN);

		cv::imshow("Out", falseColorsMap);
		//------------------------------------------------------------------------------------------
		////////////////////////////////////////////////////////////////////////////////////////////

        cv::imshow( "Depth Image", mScaledDepth );
		cv::imshow( "Depth Image2", adjMap );

        const cv::Mat mImageRGB(frameColor.getHeight(), frameColor.getWidth(), CV_8UC3, (void*)frameColor.getData());

        cv::Mat cImageBGR;
        cv::cvtColor( mImageRGB, cImageBGR, CV_RGB2BGR );
		
		//-------------[Threshold]-----------------
		cv::Mat mImageThres( frameDepth.getHeight(), frameDepth.getWidth(), CV_8UC1 );

		int backgroundPxlCount = 0;
		for(int i = 0; i < 480; i++)
		{
			for(int j = 0; j < 640; j++)
			{
				int depthVal = mImageDepth.at<unsigned short>(i, j);

				avgDist = BackgroundFrame.at<unsigned short>(i, j)-2;

				if((depthVal > (avgDist-14)) && (depthVal <= (avgDist-7)))
				{
					//mImageThres.data[mImageThres.step[0]*i + mImageThres.step[1]*j] = 255;
					mImageThres.at<uchar>(i, j) = 255;
				}
				else
				{
					//mImageThres.data[mImageThres.step[0]*i + mImageThres.step[1]*j] = 0;
					mImageThres.at<uchar>(i, j) = 0;
				}

				backgroundPxlCount++;
			}
		}
		GaussianBlur( mImageThres, mImageThres, Size(3,3), 0, 0 );
		
		fingerDetection( mImageThres, cImageBGR, OldOutX, OldOutY);

		cv::imshow("Thresholded Image", mImageThres);
		//----------------------------------------
        if( cv::waitKey(1) == 'q')
		{
            break;
		}
		//------------------------------------------------
		cv::imshow( "Color Image", cImageBGR );
		//----------------------------------
		OldOutX.clear();
		OldOutY.clear();
		OldOutX = OutX;
		OldOutY = OutY;
		OutX.clear();
		OutY.clear();
    }

	return 0;
}
示例#24
0
		void ONIKinectDevice::onNewColorFrame(VideoFrameRef frame)
		{
			//printf("[%08llu] Color Frame\n", (long long)frame.getTimestamp());
			RGBDFramePtr rgbdFrame;
			//Make sure frame is in right format
			if(frame.getVideoMode().getPixelFormat() == PIXEL_FORMAT_RGB888)
			{
				int width = frame.getVideoMode().getResolutionX();
				int height = frame.getVideoMode().getResolutionY();

				//Initialize frame if not initialized
				if(rgbdFrame == NULL)
				{
					rgbdFrame = mFrameFactory.getRGBDFrame(width,height);
				}

				if(width == rgbdFrame->getXRes()  && height == rgbdFrame->getYRes())
				{
					//Data array valid. Fill it
					//TODO: Use more efficient method of transfering memory. (like memcopy, or plain linear indexing?)
					ColorPixelArray data = rgbdFrame->getColorArray();
					//TODO: Enable cropping
					//printf("Size of ColorPixel: %d Size of RGB88Pixel: %d\n", sizeof(ColorPixel), sizeof(RGB888Pixel));
					const openni::RGB888Pixel* pImage = (const openni::RGB888Pixel*)frame.getData();
					memcpy(data.get(), pImage, sizeof(RGB888Pixel)*width*height);
					/*for(int y = 0; y < height; y++)
					{
					for(int x = 0; x < width; x++)
					{
					int ind = rgbdFrame->getLinearIndex(x,y);

					data[ind] = ((ColorPixel*)pImage)[ind];
					}
					}*/
					rgbdFrame->setColorTimestamp(frame.getTimestamp());
					rgbdFrame->setHasColor(true);
					//Check if send
					if(!mSyncDepthAndColor)
					{
						//Send it
						onNewRGBDFrame(rgbdFrame);
						rgbdFrame = NULL;
					}else{
						//Sync it
						frameGuard.lock();
						if(mRGBDFrameSynced == NULL)
						{
							//FIRST POST!!!
							mRGBDFrameSynced = rgbdFrame;
						}else{
							//SECOND!!
							//Send it
							mRGBDFrameSynced->setColorArray(rgbdFrame->getColorArray());
							mRGBDFrameSynced->setHasColor(true);
							mRGBDFrameSynced->setColorTimestamp(frame.getTimestamp());
							onNewRGBDFrame(mRGBDFrameSynced);
							mRGBDFrameSynced = NULL;
						}
						//Unlock scoped
						frameGuard.unlock();
					}

				}else{
					//Size error
					onMessage("Error: depth and color frames don't match in size\n");	
				}

			}else{
				//Format error
				onMessage("Error: depth format incorrect\n");	
			}
		}