コード例 #1
0
int imageProcess(cv::Mat frame)
{

int countLabel = 0;

//ROS_INFO("Part 4");

    Mat grayFrame;
//ROS_INFO("Part 5");

    cvtColor(frame,grayFrame,CV_RGB2GRAY);
//ROS_INFO("Part 6");

    //Watershed Segmentation


        // Get the binary map
           cv::Mat binary;
           //binary = cv::imread("binary.bmp", 0); // prevent loading of pre-converted image
           cvtColor(frame, grayFrame, CV_BGR2GRAY); // instead convert original
           grayFrame = grayFrame < 60; // apply threshold

           // Display the binary image
           cv::namedWindow("Binary Image");
        //   cv::imshow("Binary Image", grayFrame);

           // Eliminate noise and smaller objects
           cv::Mat fg;
           cv::erode(grayFrame, fg, cv::Mat(), cv::Point(-1, -1), 6);

           // Display the foreground image
        //   cv::namedWindow("Foreground Image");
        //   cv::imshow("Foreground Image", fg);

           // Identify image pixels without objects
           cv::Mat bg;
           cv::dilate(grayFrame, bg, cv::Mat(), cv::Point(-1, -1), 6);
           cv::threshold(bg, bg, 1, 128, cv::THRESH_BINARY_INV);

           // Display the background image
          // cv::namedWindow("Background Image");
         //  cv::imshow("Background Image", bg);

           // Show markers image
           cv::Mat markers(grayFrame.size(), CV_8U, cv::Scalar(0));
           markers = fg + bg;
          // cv::namedWindow("Markers");
         //  cv::imshow("Markers", markers);

          // Create watershed segmentation object
           WatershedSegmenter segmenter;
           // Set markers and process
           segmenter.setMarkers(markers);
           segmenter.process(frame);
//ROS_INFO("Part 7");
   cv::Mat mutableImage;
   mutableImage = segmenter.getSegmentation();
   
   string name = "simpleWall";
   
   std::stringstream sstm;
   sstm << name << hallwayCenter;
   ++countLabel;
   
   imwrite(sstm.str()+".jpg", segmenter.getSegmentation());

   cv::namedWindow("testVision");
   cv::imshow("testVision",mutableImage);
  // cv::imwrite("imgForPoster", mutableImage);

//ROS_INFO("Part 8");

   //Set starting x,y values

           int y_ex= 260;
           int x_ex= 320;


//ROS_INFO("Part 9");

Scalar intensity; 

           for (int i=x_ex; i>0; i--) {
//ROS_INFO("Part 9.1");
        	   Scalar intensity = (int)mutableImage.at<uchar>(y_ex, i);
//ROS_INFO("Part 9.2");
        	   Scalar intensity1 = (int)mutableImage.at<uchar>(y_ex, i-10);
//ROS_INFO("Part 9.3");
        	   int floorVal= (int)*intensity.val;
//ROS_INFO("Part 9.4");
        	   int floorVal1= (int)*intensity1.val;

           if (floorVal==128 && floorVal1!= 128 ){
//ROS_INFO("Part 9.41");
        	           		   a1=i;

           }


           }

//ROS_INFO("Part 10");

           for (int i=x_ex; i<590; i++) {
        	   Scalar intensity = (int)mutableImage.at<uchar>(y_ex, i);
        	   Scalar intensity2 = (int)mutableImage.at<uchar>(y_ex, i+30);
        	   int floorVal= (int)*intensity.val;
        	   int floorVal2= (int)*intensity2.val;

        	   if(floorVal==128 && floorVal2 != 128) {
        		   a2=i;
        	   }
           }

//ROS_INFO("Part 11");

           hallwayCenter=(a1+a2)/2;
           int imageCenter= 300;
//ROS_INFO("Part 12");
           int error= hallwayCenter-imageCenter;

//ROS_INFO("Part 13");

     	   double tempYawValue= error*kValue;

	   double yawArray[10000];
   
           yawArray[countLabel] = tempYawValue;
           
           


           //MAIN Y Value for ROS implemenation. Should be teling me where the center is in relation to one of the walls.
//ROS_INFO("Part 14");

            putText(mutableImage, "X", cvPoint(hallwayCenter,y_ex), 0, 2.5, cvScalar(0,0,0,255), 3, CV_AA);
             cout<<"It is working "<<hallwayCenter;
//ROS_INFO("Part 15");
 

//ROS_INFO("Part 16");
ROS_INFO("Frame Error:  [%d]", error); 
ros::NodeHandle n; 
ros::Publisher tempYawValue_pub = n.advertise<geometry_msgs::Twist>("cmd_vel", 1000);
geometry_msgs::Twist yaw_access;


yaw_access.angular.z = tempYawValue;


tempYawValue_pub.publish(yaw_access);

for(int i = 0; i < 10000; i++)
{
   ROS_INFO("y[%f] ",yawArray[i]);
}

return tempYawValue;

}
コード例 #2
0
ファイル: watershed.cpp プロジェクト: sshhj89/InhaDriverless
int main()
{
    //ocl::setUseOpenCL(true);
    
    VideoCapture video("/Users/sonhojun/Downloads/data/all.mp4");
    
    if(!video.isOpened())
    {
        cout<<"video open error"<<endl;
        return 0 ;
    }
    Mat pri;
    Mat aft;
    Mat binary;
    Mat frame1,frame2;
    
    while (1)
    {
        
        video.read(frame1);
        video.read(frame2);
        
//        Mat cropFrame1, cropFrame2;
//        Mat oriConv = frame1(Rect(10,340,700,200));
//        
////        cropFrame1 = frame1(Rect(10,340,700,200));
////        cropFrame2 = frame2(Rect(10,340,700,200));
//        
//        Mat grayImage1,grayImage2;
//        Mat differenceImage;
//        Mat thresholdImage;
//        Vec<double,4> totalDiff = 0.0;
//        
//        cropFrame1 = frame1(Rect(10,340,700,200));
//        cvtColor(cropFrame1, grayImage1, COLOR_BGR2GRAY);
//        
//        cropFrame2 =frame2(Rect(10,340,700,200));
//        
//        cvtColor(cropFrame2, grayImage2, COLOR_BGR2GRAY);
//        absdiff(grayImage1,grayImage2,differenceImage);
//        
//        totalDiff = sum(differenceImage) / (690 * 140);
//        cout << "sum diff: " <<totalDiff<<endl;
//        
//        if(totalDiff[0] > 14.0)
//            continue;
//        
//        threshold(differenceImage, thresholdImage, SENSITIVITY_VALUE, 255, THRESH_BINARY);
//        
//        blur(thresholdImage, thresholdImage , Size(BLUR_SIZE,BLUR_SIZE));
//        
//        threshold(thresholdImage,thresholdImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
//        
//        
//        //if tracking enabled, search for contours in our thresholded image
//
//        //searchForMovement(thresholdImage, oriConv);
//        
//        
//        //show our captured frame
//        imshow("moving",thresholdImage);
        
      
       // imshow("ori",temp);
        Mat cropRGB = frame2(Rect(10,340,700,200));
        Mat cropG;
        cvtColor(cropRGB, cropG, CV_RGB2GRAY);
        GaussianBlur(cropG, cropG, Size(5,5), 0,0);
        threshold(cropG,binary,0,255,THRESH_OTSU|THRESH_BINARY);
      //  adaptiveThreshold(cropG, binary, 255, <#int adaptiveMethod#>, THRESH_OTSU|THRESH_BINARY, 3, 0.);
       // bitwise_xor(thresholdImage,binary,binary);
        imshow("bin",binary);
      
        Mat fg;
        
        erode(binary, fg, Mat(), Point(-1, -1), 6);

        namedWindow("Foreground Image");
        imshow("Foreground Image", fg);

        Mat bg;
        dilate(binary,bg,Mat(),Point(-1,-1),6);
        threshold(bg,bg,1,128,THRESH_BINARY_INV);

        namedWindow("Background Image");
        imshow("Background Image",bg);

//        Mat dist;
//        distanceTransform(binary, dist, CV_DIST_L2, 5);
//        normalize(dist, dist, 0, 1., NORM_MINMAX);
//        imshow("dist",dist);
//        
//        threshold(dist, dist, .4, 1., CV_THRESH_BINARY);
//                // Dilate a bit the dist image
//        Mat kernel1 = Mat::ones(3, 3, CV_8UC1);
//        dilate(dist, dist, kernel1);
//        imshow("Peaks", dist);
        
        Mat markers(binary.size(),CV_8U,Scalar(0));
        markers= fg+bg;

        namedWindow("Markers");
        imshow("Markers",markers);
        
        WatershedSegmenter segmenter;
        
        segmenter.setMarkers(markers);
        segmenter.process(cropRGB);

        namedWindow("Segmentation");
        imshow("Segmentation",segmenter.getSegmentation());
        
       // segmenter.getSegmentation()
//        namedWindow("Watersheds"); // 워터쉐드 띄워 보기
//        imshow("Watersheds",segmenter.getWatersheds());
        
        
        
        
//111        Mat kernel = Mat::ones(3,3,CV_8U);
//        
//        Mat morpho;
//        morphologyEx(binary,morpho,MORPH_OPEN,kernel);
//        
//        imshow("open",morpho);
//        
//        Mat sure_bg;
//        dilate(morpho,sure_bg,kernel);
//        
//        imshow("dilate",sure_bg);
//        
//        Mat dist;
//        distanceTransform(binary, dist, CV_DIST_L2, 5);
//        // Normalize the distance image for range = {0.0, 1.0}
//        // so we can visualize and threshold it
//        normalize(dist, dist, 0, 1., NORM_MINMAX);
//        imshow("Distance Transform Image", dist);
//        // Threshold to obtain the peaks
//        // This will be the markers for the foreground objects
//        threshold(dist, dist, .4, 1., CV_THRESH_BINARY);
//        // Dilate a bit the dist image
//        Mat kernel1 = Mat::ones(3, 3, CV_8UC1);
//        dilate(dist, dist, kernel1);
//        imshow("Peaks", dist);
//        // Create the CV_8U version of the distance image
//        // It is needed for findContours()
//        Mat dist_8u;
//        dist.convertTo(dist_8u, CV_8U);
//        // Find total markers
//        vector<vector<Point> > contours;
//        findContours(dist_8u, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
//        // Create the marker image for the watershed algorithm
//        Mat markers = Mat::zeros(dist.size(), CV_32SC1);
//        // Draw the foreground markers
//        for (size_t i = 0; i < contours.size(); i++)
//            drawContours(markers, contours, static_cast<int>(i), Scalar::all(static_cast<int>(i)+1), -1);
//        // Draw the background marker
//        circle(markers, Point(5,5), 3, CV_RGB(255,255,255), -1);
//        imshow("Markers", markers*10000);
//        // Perform the watershed algorithm
//        watershed(cropRGB, markers);
////        Mat mark = Mat::zeros(markers.size(), CV_8UC1);
////        markers.convertTo(mark, CV_8UC1);
////        bitwise_not(mark, mark);
////        imshow("Markers_v2", mark); // uncomment this if you want to see how the mark
//        // image looks like at that point
//        // Generate random colors
//        vector<Vec3b> colors;
//        for (size_t i = 0; i < contours.size(); i++)
//        {
//            int b = theRNG().uniform(0, 255);
//            int g = theRNG().uniform(0, 255);
//            int r = theRNG().uniform(0, 255);
//            colors.push_back(Vec3b((uchar)b, (uchar)g, (uchar)r));
//        }
//        // Create the result image
//        Mat dst = Mat::zeros(markers.size(), CV_8UC3);
//        // Fill labeled objects with random colors
//        for (int i = 0; i < markers.rows; i++)
//        {
//            for (int j = 0; j < markers.cols; j++)
//            {
//                int index = markers.at<int>(i,j);
//                if (index > 0 && index <= static_cast<int>(contours.size()))
//                    dst.at<Vec3b>(i,j) = colors[index-1];
//                else
//                    dst.at<Vec3b>(i,j) = Vec3b(0,0,0);
//            }
//        }
//        // Visualize the final image
//        imshow("Final Result", dst);
//        
//    
// 111       1

        cvWaitKey(10);
    }
    
    return 0;
}
コード例 #3
0
int main()
{
	// Read input image
	cv::Mat image= cv::imread("../images/group.jpg");
	if (!image.data)
		return 0;

    // Display the image
	cv::namedWindow("Original Image");
	cv::imshow("Original Image",image);

	// Get the binary map
	cv::Mat binary;
	binary= cv::imread("../images/binary.bmp",0);

    // Display the binary image
	cv::namedWindow("Binary Image");
	cv::imshow("Binary Image",binary);

	// Eliminate noise and smaller objects
	cv::Mat fg;
	cv::erode(binary,fg,cv::Mat(),cv::Point(-1,-1),6);

    // Display the foreground image
	cv::namedWindow("Foreground Image");
	cv::imshow("Foreground Image",fg);

	// Identify image pixels without objects
	cv::Mat bg;
	cv::dilate(binary,bg,cv::Mat(),cv::Point(-1,-1),6);
	cv::threshold(bg,bg,1,128,cv::THRESH_BINARY_INV);

    // Display the background image
	cv::namedWindow("Background Image");
	cv::imshow("Background Image",bg);

	// Show markers image
	cv::Mat markers(binary.size(),CV_8U,cv::Scalar(0));
	markers= fg+bg;
	cv::namedWindow("Markers");
	cv::imshow("Markers",markers);

	// Create watershed segmentation object
	WatershedSegmenter segmenter;

	// Set markers and process
	segmenter.setMarkers(markers);
	segmenter.process(image);

	// Display segmentation result
	cv::namedWindow("Segmentation");
	cv::imshow("Segmentation",segmenter.getSegmentation());

	// Display watersheds
	cv::namedWindow("Watersheds");
	cv::imshow("Watersheds",segmenter.getWatersheds());

	// Open another image
	image= cv::imread("../images/tower.jpg");
    if (!image.data)
        return 0;

	// Identify background pixels
	cv::Mat imageMask(image.size(),CV_8U,cv::Scalar(0));
	cv::rectangle(imageMask,cv::Point(5,5),cv::Point(image.cols-5,image.rows-5),cv::Scalar(255),3);
	// Identify foreground pixels (in the middle of the image)
	cv::rectangle(imageMask,cv::Point(image.cols/2-10,image.rows/2-10),
						 cv::Point(image.cols/2+10,image.rows/2+10),cv::Scalar(1),10);

	// Set markers and process
	segmenter.setMarkers(imageMask);
	segmenter.process(image);

    // Display the image with markers
	cv::rectangle(image,cv::Point(5,5),cv::Point(image.cols-5,image.rows-5),cv::Scalar(255,255,255),3);
	cv::rectangle(image,cv::Point(image.cols/2-10,image.rows/2-10),
						 cv::Point(image.cols/2+10,image.rows/2+10),cv::Scalar(1,1,1),10);
	cv::namedWindow("Image with marker");
	cv::imshow("Image with marker",image);

	// Display watersheds
	cv::namedWindow("Watersheds of foreground object");
	cv::imshow("Watersheds of foreground object",segmenter.getWatersheds());

	// Open another image
	image= cv::imread("../images/tower.jpg");

	// define bounding rectangle
	cv::Rect rectangle(50,70,image.cols-150,image.rows-180);

	cv::Mat result; // segmentation result (4 possible values)
	cv::Mat bgModel,fgModel; // the models (internally used)
	// GrabCut segmentation
	cv::grabCut(image,    // input image
		        result,   // segmentation result
				rectangle,// rectangle containing foreground
				bgModel,fgModel, // models
				1,        // number of iterations
				cv::GC_INIT_WITH_RECT); // use rectangle

	// Get the pixels marked as likely foreground
	cv::compare(result,cv::GC_PR_FGD,result,cv::CMP_EQ);
	// Generate output image
	cv::Mat foreground(image.size(),CV_8UC3,cv::Scalar(255,255,255));
	image.copyTo(foreground,result); // bg pixels not copied

	// draw rectangle on original image
	cv::rectangle(image, rectangle, cv::Scalar(255,255,255),1);
	cv::namedWindow("Image");
	cv::imshow("Image",image);

	// display result
	cv::namedWindow("Segmented Image");
	cv::imshow("Segmented Image",foreground);

	// Open another image
	image= cv::imread("../images/group.jpg");

	// define bounding rectangle
    cv::Rect rectangle2(10,100,380,180);

	cv::Mat bkgModel,fgrModel; // the models (internally used)
	// GrabCut segmentation
	cv::grabCut(image,  // input image
		        result, // segmentation result
				rectangle2,bkgModel,fgrModel,5,cv::GC_INIT_WITH_RECT);
	// Get the pixels marked as likely foreground
//	cv::compare(result,cv::GC_PR_FGD,result,cv::CMP_EQ);
	result= result&1;
	foreground.create(image.size(),CV_8UC3);
    foreground.setTo(cv::Scalar(255,255,255));
	image.copyTo(foreground,result); // bg pixels not copied

	// draw rectangle on original image
	cv::rectangle(image, rectangle2, cv::Scalar(255,255,255),1);
	cv::namedWindow("Image 2");
	cv::imshow("Image 2",image);

	// display result
	cv::namedWindow("Foreground objects");
	cv::imshow("Foreground objects",foreground);

	cv::waitKey();
	return 0;
}