/*! Constructs a video frame with the given width, height and \a format, that uses an existing memory buffer, data. The width and height must be specified in pixels given by \a size, \a data must be 32-bit aligned, and each plane and scanline of data in the image must also be 32-bit aligned. The buffer must remain valid throughout the life of the QVideoFrame and all copies that have not been modified or otherwise detached from the original buffer. The QVideoFrame does not delete the buffer at destruction. The \a helper parameter provides thread safety. */ QVideoFrame::QVideoFrame( PixelFormat format, const QSize& size, uchar* data, QVideoFrame::BufferHelper *helper) :d( new QVideoFramePrivate ) { d->format = format; d->size = size; d->bufferHelper = helper; uchar* planeData = data; if ( !size.isEmpty() ) { for ( int plane = 0; plane < maxPlanesCount; ++plane ) { int depth = colorDepth( format, plane ); QSize planeSize = d->planeSize( plane ); if ( depth == 0 || planeSize.isEmpty() ) break; d->planes[ plane ] = planeData; d->bytesPerLine[ plane ] = (( planeSize.width() * depth + 31)/32) * 32 / 8; planeData += planeSize.height() * d->bytesPerLine[ plane ]; } } if ( d->bufferHelper ) d->bufferHelper->lock(); }
void Viewer::display( const colorspaces::Image& imageColor, const colorspaces::Image& imageDepth ) { // Draw optical center for (int i = -2; i<=2; i++) for (int j =-2; j<=2; j++) { int pixel = (imageColor.width*(imageColor.height/2+j)+(imageColor.width/2+i))*3; imageColor.data[pixel] = 255; imageColor.data[pixel+1] = 0; imageColor.data[pixel+2] = 0; } // Save depth map in map vector (just save 25 maps). Each map is saved each 2 iterations if (handlerDepth) { std::vector<colorspaces::Image>::iterator it = mDepthVector.begin(); mDepthVector.insert(it, imageDepth); if (mDepthVector.size() > MAX_MAPS) mDepthVector.erase(mDepthVector.begin() + MAX_MAPS, mDepthVector.end()); else if (mDepthVector.size() == MAX_MAPS -1) std::cout << "* Depth vector is already filled!" << std::endl; } else handlerDepth = !handlerDepth; colorspaces::ImageRGB8 img_rgb8(imageColor);//conversion will happen if needed Glib::RefPtr<Gdk::Pixbuf> imgBuffColor = Gdk::Pixbuf::create_from_data((const guint8*)img_rgb8.data, Gdk::COLORSPACE_RGB, false, 8, img_rgb8.width, img_rgb8.height, img_rgb8.step); gtkimage_color->clear(); gtkimage_color->set(imgBuffColor); gtkimage_color2->clear(); gtkimage_color2->set(imgBuffColor); if (intrinsicsEnable) saveImage(imageColor); imgOrig.create(imageColor.size(), CV_8UC3); imageColor.copyTo(imgOrig); pthread_mutex_lock(&mutex); mImageDepth = imageDepth.clone(); pthread_mutex_unlock(&mutex); // Show depth image in color // 3 RGB canals // 0: Image in gray scale // [1,2]: Real data of distance // 1: 8bit MSB // 2: 8bit LSB std::vector<cv::Mat> layers; cv::Mat colorDepth(imageDepth.size(),imageDepth.type()); cv::split(imageDepth, layers); cv::cvtColor(layers[0],colorDepth,CV_GRAY2RGB); Glib::RefPtr<Gdk::Pixbuf> imgBuffDepth = Gdk::Pixbuf::create_from_data((const guint8*) colorDepth.data, Gdk::COLORSPACE_RGB, false, 8, imageDepth.width, imageDepth.height, imageDepth.step); gtkimage_depth->clear(); gtkimage_depth->set(imgBuffDepth); if (hsvFilter != NULL) { createImageHSV(imageDepth); // Show HSV image Glib::RefPtr<Gdk::Pixbuf> imgBuffHSV = Gdk::Pixbuf::create_from_data((const guint8*)imgHSV.data, Gdk::COLORSPACE_RGB, false, 8, imgHSV.size().width, imgHSV.size().height, imgHSV.step); gtkimage_hsv->clear(); gtkimage_hsv->set(imgBuffHSV); // Show Blob Image Glib::RefPtr<Gdk::Pixbuf> imgBuffBLOB = Gdk::Pixbuf::create_from_data( (guint8*)mFrameBlob->imageData, Gdk::COLORSPACE_RGB, false, mFrameBlob->depth, mFrameBlob->width, mFrameBlob->height, mFrameBlob->widthStep); gtkimage_blob->clear(); gtkimage_blob->set(imgBuffBLOB); //cvReleaseImage(&mFrameBlob); } displayFrameRate(); mainwindow->resize(1,1); while (gtkmain.events_pending()) gtkmain.iteration(); }