void texture::submit( int format, bool build_mip_maps ) { std::vector<unsigned char> pixels = ( format == GL_RGBA ? rgba_data() : rgb_data() ); // Set unpack alignment to one byte GLint UnpackAlignment; glGetIntegerv( GL_UNPACK_ALIGNMENT, &UnpackAlignment ); glPixelStorei( GL_UNPACK_ALIGNMENT, 1 ); // Submit decoded data to texture GLint ret; if( build_mip_maps ) ret = gluBuild2DMipmaps(GL_TEXTURE_2D, format, w, h, format, GL_UNSIGNED_BYTE, pixels.data() ); else glTexImage2D(GL_TEXTURE_2D, 0 /*LOD*/, format, w, h, 0 /*border*/, format, GL_UNSIGNED_BYTE, pixels.data() ); // Restore old unpack alignment glPixelStorei( GL_UNPACK_ALIGNMENT, UnpackAlignment ); }
void KinectGrabber::extractFace() { clock_t begin_time = clock(); std::unique_lock<std::mutex> lock(latestFaceMutex); if (frameNr == latestFrameNr) { //std::cout << "No new data" << std::endl; std::this_thread::yield(); return; } /* * Copy pointers for save work */ boost::shared_ptr<openni_wrapper::Image> lastImage(this->lastImage); boost::shared_ptr<openni_wrapper::DepthImage> lastDepthImage( this->lastDepthImage); lock.unlock(); if (lastImage->getEncoding() != openni_wrapper::Image::RGB) { boost::shared_array<unsigned char> rgb_data( new unsigned char[lastImage->getWidth() * lastImage->getHeight() * 3]); lastImage->fillRGB(lastImage->getWidth(), lastImage->getHeight(), rgb_data.get()); cv::Mat matimg(lastImage->getHeight(), lastImage->getWidth(), CV_8UC3, rgb_data.get(), 0); cv::cvtColor(matimg, matimg, CV_BGR2RGB); std::vector<cv::Rect> faces; facedetector.detectMultiScale(matimg, faces, 2, 6); cv::Rect selectedFace; for (cv::Rect f : faces) { cv::rectangle(matimg, f, cv::Scalar(255, 0, 0)); selectedFace = f; } cv::imshow("CVFace1", matimg); cv::waitKey(10); if (selectedFace.width == 0) { //std::cerr << "No face found!"; } else if (lastImage->getWidth() != lastDepthImage->getWidth() || lastImage->getHeight() != lastDepthImage->getHeight()) { std::cerr << "strange Kinect depth image different size than RGB image .. implement skipping"; } else { boost::shared_ptr<pcl::PointCloud<pcl::PointXYZRGB> > cloud( new pcl::PointCloud<pcl::PointXYZRGB>); cloud->header.frame_id = "ExportedKinect"; cloud->height = selectedFace.height - 1; cloud->width = selectedFace.width - 1; cloud->is_dense = false; cloud->points.resize(cloud->height * cloud->width); //Specific for Kinect but device_ in grabber is private. Important for scaling float constant_x = 0.0017f; float constant_y = 0.0017f; float centerX = ((float) cloud->width - 10.f) / 2.f; float centerY = ((float) cloud->height - 10.f) / 2.f; int pointnr = 0; pcl::RGBValue color; color.Alpha = 0; const XnDepthPixel* depth_map = lastDepthImage->getDepthMetaData().Data(); for (int y = selectedFace.y + 1; y < selectedFace.y + selectedFace.height - 1; ++y, pointnr++) { for (int x = selectedFace.x + 1; x < selectedFace.x + selectedFace.width - 1; ++x, pointnr++) { pcl::PointXYZRGB& pt = cloud->points[pointnr]; int arraypos = x + y * lastDepthImage->getWidth(); pt.z = depth_map[arraypos] * 0.01f; pt.x = (static_cast<float>(x) - centerX) * pt.z * constant_x; pt.y = (static_cast<float>(y) - centerY) * pt.z * constant_y; color.Red = rgb_data[arraypos * 3 + 2]; color.Green = rgb_data[arraypos * 3 + 1]; color.Blue = rgb_data[arraypos * 3]; pt.rgba = color.long_value; } } filterOutliers<pcl::PointXYZRGB>(cloud); lock.lock(); latestFace = cloud; faceNr++; lock.unlock(); } } }