void GridSegmenter::segmentGrid(dgc_perception_map_cells_t* obstacles, vector< boost::shared_ptr<Obstacle> >* regions, vector< boost::shared_ptr<TrackedObstacle> > tracks, double timestamp) {
  regions_ = regions;
  timestamp_ = timestamp;
  tracks_ = tracks;

  findRegions(obstacles);
}
Exemple #2
0
	void Detection::doFacialComponentsExtraction()
	{
		/*
		REMARK: as the original paper does not say too much how specific components are found, we will implement this step according to Akimoto:
		the inner regions (the black holes in the white face) are used and are classified according to some very simple rules:
		-> size
		-> position (relative to center of gravity of face) 
		*/
		
		for (size_t i = 0; i < m_FaceExtracted.size(); ++i)
		{
			// find the contours (bounded binary regions)
			std::vector<std::vector<cv::Point> > contours;
			std::vector<cv::Vec4i> hierarchy;

			// copy original as findContours changes image
			cv::Mat tmp; 
			m_FaceExtracted[i].copyTo(tmp);

			cv::findContours(tmp, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0));

			// indices of the potential face components
			std::vector<size_t> potentialComponentIndices = findRegions(contours,hierarchy,RegionTypeInside); // indices of facial components
			std::vector<size_t> potentialSkinIndices = findRegions(contours, hierarchy, RegionTypeOutside); // indices of face (skin) region

			// extract information for those regions
			std::vector<ContourInfo> potentialComponentContourInfo = extractContourInfo(contours, potentialComponentIndices); // facial components
			std::vector<ContourInfo> potentialSkinContourInfo = extractContourInfo(contours, potentialSkinIndices); // face (skin) region			

			if (frontImgNr==i)
			{
				doFacialComponentsExtractionFront(m_FaceGeometry, potentialComponentContourInfo, potentialSkinContourInfo);
			}
			else if (sideImgNr==i)
			{
				doFacialComponentsExtractionSide(m_FaceGeometry, potentialComponentContourInfo, potentialSkinContourInfo);
			}						
		}




		// draw resulting 2d centroids
		// 1. front
		cv::Mat tmp1 = getCopyOfOriginal(frontImgNr);
		cv::circle(tmp1, m_FaceGeometry.getDetectedPointInt(FaceGeometry::FrontLeftEye), 10, cv::Scalar(255, 0, 0), 2);
		cv::circle(tmp1, m_FaceGeometry.getDetectedPointInt(FaceGeometry::FrontRightEye), 10, cv::Scalar(0, 255, 0), 2);
		cv::circle(tmp1, m_FaceGeometry.getDetectedPointInt(FaceGeometry::FrontMouth), 10, cv::Scalar(0, 0, 255), 2);
		cv::circle(tmp1, m_FaceGeometry.getDetectedPointInt(FaceGeometry::FrontLeftCheek), 10, cv::Scalar(255, 255, 255), 2);
		cv::circle(tmp1, m_FaceGeometry.getDetectedPointInt(FaceGeometry::FrontRightCheek), 10, cv::Scalar(255, 255, 255), 2);
		dbgShow(tmp1,"doFacialComponentsExtraction",0);

		// 2. side
		cv::Mat tmp2 = getCopyOfOriginal(sideImgNr);
		cv::circle(tmp2, m_FaceGeometry.getDetectedPointInt(FaceGeometry::SideEye), 10, cv::Scalar(255, 0, 0), 2);
		cv::circle(tmp2, m_FaceGeometry.getDetectedPointInt(FaceGeometry::SideNoseTip), 10, cv::Scalar(255, 0, 255), 2);
		cv::circle(tmp2, m_FaceGeometry.getDetectedPointInt(FaceGeometry::SideChin), 10, cv::Scalar(255, 255, 0), 2);
		cv::circle(tmp2, m_FaceGeometry.getDetectedPointInt(FaceGeometry::SideBack), 10, cv::Scalar(255, 255, 255), 2);
		dbgShow(tmp2,"doFacialComponentsExtraction",1);

		// put into result image for gui
		m_FacialPointsGUI = combineVertically(tmp1,tmp2);

	}