Example #1
0
// Find and crop face by dlib.
cv::Mat FaceAlign::detectAlignCrop(const cv::Mat &img,
                                   cv::Rect & rect,
                                   cv::Mat & H,
                                   cv::Mat & inv_H,
                                   const int imgDim,
                                   const int landmarkIndices[],
                                   const float scale_factor)
{
    // Detection
    /* Dlib detects a face larger than 80x80 pixels.
     * We can use pyramid_up to double the size of image,
     * then dlib can find faces in size of 40x40 pixels in the original image.*/
    // dlib::pyramid_up(img);
    dlib::cv_image<dlib::bgr_pixel> cimg(img);
    std::vector<dlib::rectangle> dets;
    dets.push_back(getLargestFaceBoundingBox(cimg)); // Use the largest detected face only
    if (0 == dets.size() || dets[0].is_empty())
    {
        rect = cv::Rect();
        return cv::Mat();
    }
    rect = dlibRectangleToOpenCV(dets[0]);

    // --Alignment
    return align(cimg, H, inv_H, dets[0], imgDim, landmarkIndices, scale_factor);
}
Example #2
0
std::vector<cv::Rect> FaceAlign::getAllFaceBoundingBoxes(cv::Mat & rgbImg) {
  dlib::cv_image<dlib::bgr_pixel> cimg(rgbImg);
  std::vector<dlib::rectangle> bb = getAllFaceBoundingBoxes(cimg);
  std::vector<cv::Rect> res;
  for (int i = 0; i < bb.size(); i++) 
    res.push_back(dlibRectangleToOpenCV(bb[i]));
  return res;
}
Example #3
0
void FaceAlign::detectFace(const cv::Mat & img, std::vector<cv::Rect> & rects)
{
    dlib::cv_image<dlib::bgr_pixel> cimg(img);
    std::vector<dlib::rectangle> dets = getAllFaceBoundingBoxes(cimg);
    rects.clear();
    for (int i = 0; i < dets.size(); i++)
        rects.push_back(dlibRectangleToOpenCV(dets[i]));
}
	std::unique_ptr<ImageRGB> loadImg(std::string filename)
	{
		CImg<unsigned char> cimg(filename.c_str());

		if (cimg.spectrum() < 3) {
			throw new std::runtime_error("Image is not rgb");
		}

		return std::make_unique<ImageRGB>(cimg.width(), cimg.height(), cimg.begin());
	}
Example #5
0
cv::Mat  FaceAlign::align(cv::Mat & rgbImg,
                          cv::Rect rect,
                          const int imgDim,
                          const int landmarkIndices[],
                          const float scale_factor)
{
    cv::Mat H, inv_H;
    dlib::cv_image<dlib::bgr_pixel> cimg(rgbImg);
    return align(cimg, H, inv_H, openCVRectToDlib(rect), imgDim, landmarkIndices, scale_factor);
}
Example #6
0
std::vector<cv::Point2f> FaceAlign::getLargestFaceLandmarks(cv::Mat & rgbImg) {
  dlib::cv_image<dlib::bgr_pixel> cimg(rgbImg);
  dlib::full_object_detection landmarks = getLargestFaceLandmarks(cimg);
  std::vector<cv::Point2f> res;
  if (landmarks.num_parts() <= 0)
    return res;
  for (int i = 0; i < 68; i++) {
    cv::Point2f p(landmarks.part(i).x(), landmarks.part(i).y());
    res.push_back(p);
  }
  return res;
}
Example #7
0
std::vector<std::vector<cv::Point2f> > FaceAlign::getAllFaceLandmarks(cv::Mat & rgbImg){
  dlib::cv_image<dlib::bgr_pixel> cimg(rgbImg);
  std::vector<std::vector<cv::Point2f> > res;
  cv::vector<dlib::full_object_detection> dets = getAllFaceLandmarks(cimg);
  for (int d = 0 ; d < dets.size(); d++) {
    std::vector<cv::Point2f> lm;
    for (int i = 0; i < 68; i++) {
      cv::Point2f p(dets[d].part(i).x(), dets[d].part(i).y());
      lm.push_back(p);
    }
    res.push_back(lm);
  }
  return res;
}
ImgType copy_cvmat_to_dlib_mat(cv::Mat &input, bool release_after_copy = true)
{
    static_assert(std::is_class<ImgType>::value, "ImgType should be a class");

    using pixel_type = typename dlib::image_traits<ImgType>::pixel_type;
    cv_image<pixel_type> cimg(input);
    ImgType output;
    dlib::assign_image(output, cimg);
    if(release_after_copy){
        input.release();
    }

    return output;
}
void MSG_data_image_encoded::decode( MSG_data_image *dec )
{
  long long dlen = len * 8;

  uint_1 *ibuf = new uint_1[len];
  memcpy(ibuf, data, len);

  Util::CDataFieldCompressedImage cdata =
                    Util::CDataFieldCompressedImage(ibuf, dlen, bpp, nx, ny);

  Util::CDataFieldUncompressedImage udata;
  std::vector <short> QualityInfo;

  switch (format)
  {
    case MSG_JPEG_FORMAT:
      COMP::DecompressJPEG(cdata, bpp, udata, QualityInfo);
      break;
    case MSG_WAVELET_FORMAT:
      COMP::DecompressWT(cdata, bpp, udata, QualityInfo);
      break;
    case MSG_T4_FORMAT:
      COMP::DecompressT4(cdata, udata, QualityInfo);
      break;
    default:
      std::cerr << "Unknown compression used." << std::endl;
      throw;
  }

  COMP::CImage cimg(udata);

  int decnum = nx * ny;
  dec->data = new MSG_SAMPLE[decnum];
  memcpy(dec->data, cimg.Get( ), decnum*sizeof(MSG_SAMPLE));
  dec->len  = decnum;


  return;
}
Example #10
0
void KinectDataMan::ShowColorDepth()
{
	// init kinect and connect
	if( m_cvhelper.IsInitialized() )
		return;

	m_cvhelper.SetColorFrameResolution(color_reso);
	m_cvhelper.SetDepthFrameResolution(depth_reso);

	HRESULT hr;
	// Get number of Kinect sensors
	int sensorCount = 0;
	hr = NuiGetSensorCount(&sensorCount);
	if (FAILED(hr)) 
	{
		return;
	}

	// If no sensors, update status bar to report failure and return
	if (sensorCount == 0)
	{
		cerr<<"No kinect"<<endl;
	}

	// Iterate through Kinect sensors until one is successfully initialized
	for (int i = 0; i < sensorCount; ++i) 
	{
		INuiSensor* sensor = NULL;
		hr = NuiCreateSensorByIndex(i, &sensor);
		if (SUCCEEDED(hr))
		{
			hr = m_cvhelper.Initialize(sensor);
			if (SUCCEEDED(hr)) 
			{
				// Report success
				cerr<<"Kinect initialized"<<endl;
				break;
			}
			else
			{
				// Uninitialize KinectHelper to show that Kinect is not ready
				m_cvhelper.UnInitialize();
				return;
			}
		}
	}

	DWORD width, height;
	m_cvhelper.GetColorFrameSize(&width, &height);
	Size colorsz(width, height);
	Mat cimg(colorsz, m_cvhelper.COLOR_TYPE);
	m_cvhelper.GetDepthFrameSize(&width, &height);
	Size depthsz(width, height);
	Mat dimg(depthsz, m_cvhelper.DEPTH_RGB_TYPE);

	// start processing
	while(true)
	{
		// get color frame
		if( SUCCEEDED(m_cvhelper.UpdateColorFrame()) )
		{
			HRESULT hr = m_cvhelper.GetColorImage(&cimg);
			if(FAILED(hr))
				break;

			imshow("color", cimg);
			if( waitKey(10) == 'q' )
				break;
		}

		if( SUCCEEDED(m_cvhelper.UpdateDepthFrame()) )
		{
			HRESULT hr = m_cvhelper.GetDepthImageAsArgb(&dimg);
			if(FAILED(hr))
				break;

			imshow("depth", dimg);
			if( waitKey(10) == 'q' )
				break;
		}
	}

	destroyAllWindows();

}
Example #11
0
void ObjectTester::RunVideoDemo()
{
	GenericObjectDetector detector;

	KinectDataMan kinectDM;

	if( !kinectDM.InitKinect() )
		return;

	bool doRank = true;
	// start fetching stream data
	while(1)
	{
		Mat cimg, dmap;
		kinectDM.GetColorDepth(cimg, dmap);

		// resize image
		Size newsz;
		ToolFactory::compute_downsample_ratio(Size(cimg.cols, cimg.rows), 300, newsz);
		resize(cimg, cimg, newsz);
		//resize(dmap, dmap, newsz);
		//normalize(dmap, dmap, 0, 255, NORM_MINMAX);

		// get objects
		vector<ImgWin> objwins, salwins;
		if( !detector.ProposeObjects(cimg, dmap, objwins, salwins, doRank) )
			continue;

		//////////////////////////////////////////////////////////////////////////
		// draw best k windows
		int topK = MIN(6, objwins.size());
		int objimgsz = newsz.height / topK;
		int canvas_h = newsz.height;
		int canvas_w = newsz.width + 10 + objimgsz*2;
		Mat canvas(canvas_h, canvas_w, CV_8UC3);
		canvas.setTo(Vec3b(0,0,0));
		// get top windows
		vector<Mat> detimgs(topK);
		vector<Mat> salimgs(topK);
		for (int i=0; i<topK; i++)
		{
			cimg(objwins[i]).copyTo(detimgs[i]);
			resize(detimgs[i], detimgs[i], Size(objimgsz, objimgsz));
			cimg(salwins[i]).copyTo(salimgs[i]);
			resize(salimgs[i], salimgs[i], Size(objimgsz, objimgsz));
		}

		// draw boxes on input
		Scalar objcolor(0, 255, 0);
		Scalar salcolor(0, 0, 255);
		for(int i=0; i<topK; i++)
		{
			rectangle(cimg, objwins[i], objcolor);
			rectangle(cimg, salwins[i], salcolor);
		}
		circle(cimg, Point(cimg.cols/2, cimg.rows/2), 2, CV_RGB(255,0,0));
		// copy input image
		cimg.copyTo(canvas(Rect(0, 0, cimg.cols, cimg.rows)));

		// copy subimg
		for (int i=0; i<detimgs.size(); i++)
		{
			Rect objbox(cimg.cols+10, i*objimgsz, objimgsz, objimgsz);
			detimgs[i].copyTo(canvas(objbox));
			Rect salbox(cimg.cols+10+objimgsz, i*objimgsz, objimgsz, objimgsz);
			salimgs[i].copyTo(canvas(salbox));
		}

		resize(canvas, canvas, Size(canvas.cols*2, canvas.rows*2));
		//if(doRank)
			//putText(canvas, "Use Ranking", Point(50, 50), CV_FONT_HERSHEY_COMPLEX, 1, CV_RGB(255, 0, 0));
		imshow("object proposals", canvas);
		if( waitKey(10) == 'q' )
			break;
	}
}
Example #12
0
	void saveImg(const ImageRGB & img, const std::string filename)
	{
		CImg<unsigned char> cimg(img.data(0, 0, Channel::Red), img.width(), img.height(), 1, 3);

		cimg.save(filename.c_str());
	}
Example #13
0
	void saveImg(const ImageGray & img, const std::string filename)
	{
		CImg<unsigned char> cimg(img.data(), img.width(), img.height(), 1, 1);

		cimg.save(filename.c_str());
	}
Example #14
0
void FaceAlign::detectFace(const cv::Mat & img, cv::Rect & rect)
{
    dlib::cv_image<dlib::bgr_pixel> cimg(img);
    dlib::rectangle det = getLargestFaceBoundingBox(cimg);
    rect = dlibRectangleToOpenCV(det);
}
Example #15
0
cv::Rect FaceAlign::getLargestFaceBoundingBox(cv::Mat & rgbImg) {
  dlib::cv_image<dlib::bgr_pixel> cimg(rgbImg);
  return dlibRectangleToOpenCV(getLargestFaceBoundingBox(cimg));
}