void Pupil::blob_detect( Mat & dst){ //création du detecteur de blob SimpleBlobDetector detector; //vecteur contenant les KeyPoints std::vector<KeyPoint> keypoints; //detection du centre de masse detector.detect( dst, keypoints ); if(keypoints.size()==1) center=Point(cvRound(keypoints[0].pt.x), cvRound(keypoints[0].pt.y)); }
void Image::setNumBolbs() { memset(numBlobs,0,sizeof(double)*NUM); for(int t=0;t<NUM;t++) { readInGray(t); SimpleBlobDetector detector; vector<KeyPoint> keypoints; detector.detect(*gray,keypoints); numBlobs[t] = keypoints.size(); // cout<<numBlobs[t]<<endl; delete gray; } cout<<"blob...ok"<<endl; }
ImageConverter() : it_(nh_) { image_pub_ = it_.advertise("output_image", 1); image_sub_ = it_.subscribe("input_image", 1, &ImageConverter::imageCb, this); //for blob detection params.minDistBetweenBlobs = 10.0; // minimum 10 pixels between blobs params.filterByArea = true; // filter my blobs by area of blob params.minArea = 100; // min 100 pixels squared params.blobColor = 255; // params.maxArea = 5000.0; // max 500 pixels squared // params.minThreshold = 0; // params.maxThreshold = 5; // params.thresholdStep = 5; // // params.minArea = 10; // params.minConvexity = 0.3; // params.minInertiaRatio = 0.01; // // params.maxArea = 8000; // params.maxConvexity = 10; // // params.filterByColor = false; // params.filterByCircularity = false; blobDetector = new SimpleBlobDetector( params ); blobDetector->create("SimpleBlob"); cv::namedWindow(WINDOW); }
void imageCb(const sensor_msgs::ImageConstPtr& msg) { cv_bridge::CvImagePtr cv_ptr; try { cv_ptr = cv_bridge::toCvCopy(msg, enc::BGR8); } catch (cv_bridge::Exception& e) { ROS_ERROR("cv_bridge exception: %s", e.what()); return; } cv::split(cv_ptr->image, color_planes); blobDetector->detect( color_planes[2], keyPoints); drawKeypoints( cv_ptr->image, keyPoints, out); cv::imshow(WINDOW, out); cv::waitKey(3); image_pub_.publish(cv_ptr->toImageMsg()); }
int main(int argc, char *argv[]) { QCoreApplication a(argc, argv); clock_t wait; VideoCapture cap(0); // open the video file for reading if ( !cap.isOpened() ) // if not success, exit program { cout << "Cannot open the video file" << endl; return -1; } //cap.set(CV_CAP_PROP_POS_MSEC, 300); //start the video at 300ms // double fps = cap.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video // cout << "Frame per seconds : " << fps << endl; namedWindow("MyVideo",CV_WINDOW_AUTOSIZE); //create a window called "MyVideo" wait=clock(); int counter=1; Mat static_img; cap>>static_img; // cvtColor(static_img, static_img, CV_BGR2GRAY); //create 50 images: Mat img1,img2,img3,img4,img5,img6,img7,img8,img9,img10,result; img1=img2=img3=img4=img5=img6=img7=img8=img9=img10=result=static_img; while(1) { Mat frame; if (counter==100) { counter=1;} counter=counter+1; cout<<"counter:"<<counter<<"\n"; bool bSuccess = cap.read(frame); // read a new frame from video // cvtColor(frame, frame, CV_BGR2GRAY); if (!bSuccess) //if not success, break loop { cout << "Cannot read the frame from video file" << endl; break; } imshow("MyVideo", frame); //show the frame in "MyVideo" window if(waitKey(30) == 27) //wait for 'esc' key press for 30 ms. If 'esc' key is pressed, break loop { cout << "esc key is pressed by user" << endl; break; } if (counter==10) { img1=frame; } if (counter==20) { img2=frame; } if (counter==30) { img3=frame; } if (counter==40) { img4=frame; } if (counter==50) { img5=frame; } if (counter==60) { img6=frame; } if (counter==70) { img7=frame; } if (counter==80) { img8=frame; } if (counter==90) { img9=frame; } if (counter==99) { img10=frame; } result=(img1*0.1+img2*0.1+img3*0.1+img4*0.1+img5*0.1+img6*0.1+img7*0.1+img8*0.1+img9*0.1+img10*0.1); Mat resultg,frameg,difference,binaryim; cvtColor(result, resultg, CV_BGR2GRAY); cvtColor(frame, frameg, CV_BGR2GRAY); cv::absdiff(frameg,resultg,difference); cv::threshold(difference,binaryim,20,255,CV_THRESH_BINARY_INV); // set threshold to ignore small differences you can also use inrange function // Set up the detector with default parameters. SimpleBlobDetector detector; // Detect blobs. std::vector<KeyPoint> keypoints; detector.detect( binaryim, keypoints); // Draw detected blobs as red circles. // DrawMatchesFlags::DRAW_RICH_KEYPOINTS flag ensures the size of the circle corresponds to the size of blob Mat im_with_keypoints; drawKeypoints( frame, keypoints, im_with_keypoints, Scalar(0,0,255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS ); // Show blobs imshow("MyVideo3",result); //show the frame in "MyVideo" window imshow("difference image",difference); //show the frame in "MyVideo" window imshow("binary image",binaryim); //show the frame in "MyVideo" window imshow("keypoints", im_with_keypoints ); } return a.exec(); }
int main( int argc, char** argv ){ /*if(argc!=2){ std::cerr<<"no filename specified\n"; exit(1); } const char* imageName = argv[1]; Mat image; image = imread( imageName, 1 ); if( argc != 2 || !image.data ){ printf( " No image data \n " ); return -1; } Mat gray_image; cvtColor( image, gray_image, CV_BGR2GRAY );*/ //imwrite( "../../images/Gray_Image.jpg", gray_image ); //auto out=imwrite( "Gray_Image.jpg", gray_image ); //std::cout<<"out:"<<out<<"\n"; //cout<<"chk:"<<checksum(image)<<"\n"; //namedWindow( imageName, CV_WINDOW_AUTOSIZE ); //namedWindow( "Gray image", CV_WINDOW_AUTOSIZE ); //namedWindow(imageName,WINDOW_NORMAL); //namedWindow("Gray image",WINDOW_NORMAL); //imshow( imageName, image ); //imshow( "Gray image", gray_image ); //waitKey(0); namedWindow("f1",0); auto d=display_size(); //take lower right 1/4 of window auto quarter_size=[&](string name){ resizeWindow(name,d.width/2,d.height/2); }; quarter_size("f1"); moveWindow("f1",d.width/2,d.height/2); namedWindow("f2",0); quarter_size("f2"); moveWindow("f2",d.width/2,0); int hue_lower=77,hue_upper=149; int sat_min=0,sat_max=255; int lum_min=0,lum_max=255; for(auto file:files){ Mat in; in=imread(file,1); if(!in.data){ cerr<<"Could not read image:"<<file<<"\n"; return 1; } auto d=display_size(); resize(in,in,fit_into(in.size(),d/2));//approx right //Mat after; //cvtColor(in,after,CV_BGR2GRAY); Mat hsv_im; cvtColor(in,hsv_im,COLOR_BGR2HSV); Mat lower_red_hue_range,upper_red_hue_range,blue; auto run1=[&](){ //inRange(hsv_im,Scalar(0,100,100),Scalar(10,255,255),lower_red_hue_range); //inRange(hsv_im,Scalar(160,100,100),Scalar(179,255,255),upper_red_hue_range); //inRange(hsv_im,Scalar(100,0,10),Scalar(179,255,150),blue); //inRange(hsv_im,Scalar(hue_lower,0,0),Scalar(hue_upper,255,100),blue); inRange( hsv_im, Scalar(hue_lower,sat_min,lum_min), Scalar(hue_upper,sat_max,lum_max), blue ); //blur( blue, blue, Size(3,3) ); //Mat contours,heirarchy; //OutputArrayOfArrays contours; //int mode=CV_RETR_LIST,method=CV_CHAIN_APPROX_NONE; //findContours(blue,contours,mode,method); Mat colorized; cvtColor(blue,colorized,CV_GRAY2RGB);//seems like this is actually going to bgr. SimpleBlobDetector detector; vector<KeyPoint> keypoints; detector.detect(blue,keypoints); Mat im_with_keypoints; drawKeypoints(blue,keypoints,im_with_keypoints,Scalar{0,0,255},DrawMatchesFlags::DRAW_RICH_KEYPOINTS); colorized=im_with_keypoints; static const auto RED=Scalar{0,0,255}; circle(colorized,{200,200},100,RED); line(colorized,{200,200},{400,300},RED,10); static const auto BLUE=Scalar{255,50,50}; putText(colorized,"This is some text",{10,350},FONT_HERSHEY_DUPLEX,1,BLUE); imshow("f2",colorized); }; run1(); //int max_thresh=255; /*auto thresh_callback=[&](int,void*){ run1(); };*/ //current_callback=thresh_callback; //createTrackbar( "hue min", "f1", &hue_lower, max_thresh,callback,add_callback(thresh_callback)); auto hue_max=[&](int,void*){ run1(); }; //createTrackbar("hue max","f1",&hue_upper,255,callback,add_callback(hue_max)); auto a=[&](string name,int* value){ createTrackbar(name,"f1",value,255,callback,add_callback(hue_max)); }; a("hue min",&hue_lower); a("hue max",&hue_upper); a("sat min",&sat_min); a("sat max",&sat_max); a("lum min",&lum_min); a("lum max",&lum_max); imshow("f1",in); quarter_size("f1"); //imshow("f2",hsv_im); //imshow("f2",lower_red_hue_range);//seems to pick out the flag well. //imshow("f2",upper_red_hue_range);//seems to get the rest of the red //waitKey(100); char c=waitKey(0); //cout<<"got:"<<c<<"\n"; if(c=='q' || c=='Q') return 0; } return 0; }
int main(int argc, char** argv) { if (argc != 3) { help(argv); return 1; } // Verify the input values //VideoCapture cap(argv[1]); // open the passed video VideoCapture cap; // Futile attempt to try differetn codecs //cap.set(CV_CAP_PROP_FOURCC, CV_FOURCC('D', 'I', 'V', '4')); //cap.set(CV_CAP_PROP_FOURCC, CV_FOURCC('D', 'A', 'V', 'C')); //cap.set(CV_CAP_PROP_FOURCC, CV_FOURCC('3', 'I', 'V', '2')); //cap.set(CV_CAP_PROP_FOURCC, CV_FOURCC('3', 'I', 'V', 'X')); //cap.set(CV_CAP_PROP_FOURCC, CV_FOURCC('A', 'V', 'C', '1')); cap.set(CV_CAP_PROP_FOURCC, CV_FOURCC('H', '2', '6', '4')); cap.open(argv[1]); if (!cap.isOpened()) { // check if we succeeded cout << "\nCan not open video file '" << argv[1] << "'" << endl; return -1; } else { cout << "Video " << argv[1] << endl; cout << " width =" << cap.get(CV_CAP_PROP_FRAME_WIDTH) << endl; cout << " height =" << cap.get(CV_CAP_PROP_FRAME_HEIGHT) << endl; cout << " nframes=" << cap.get(CV_CAP_PROP_FRAME_COUNT) << endl; cout << " fps =" << cap.get(CV_CAP_PROP_FPS) << endl; } // Load the trail of locations location_train locations; if (locations.load(argv[2]) != location_train::error_code::no_error) { cout << "Cannot load the location file '" << argv[2] << "'" << endl; return -1; } // do the simple sanity check if (locations.getCount() != cap.get(CV_CAP_PROP_FRAME_COUNT)) { cout << "Data points don't match." << endl; cout << " n frames =" << cap.get(CV_CAP_PROP_FRAME_COUNT) << endl; cout << " n locations=" << locations.getCount() << endl; return -1; } location_train::point_t ul{ 0,0 }; location_train::point_t lr{ (unsigned long)cap.get(CV_CAP_PROP_FRAME_WIDTH),(unsigned long)cap.get(CV_CAP_PROP_FRAME_HEIGHT) }; if (locations.verify(ul, lr) != location_train::error_code::no_error) { cout << "Data points don't fit into video space." << endl; return -1; } // Set up the detector with default parameters. SimpleBlobDetector detector; auto loc_index = 0; auto fps = cap.get(CV_CAP_PROP_FPS); // Process frame by frame for (;;) { Mat frame; cap >> frame; // get a new frame from the file double frame_time = loc_index / fps; // Detect blobs. std::vector<KeyPoint> keypoints; detector.detect(frame, keypoints); // No need to check the range since we already verified that the number of locations // is the same as the number of frames auto location = locations[loc_index]; loc_index++; if (keypoints.size() == 0) { cout << "Error: No objects found at time: " << frame_time << endl; } bool located = false; for ( auto key : keypoints ) { // The found blob should be at least 3x3 if (key.size > 3) { if (inPoint(key.pt, key.size, location)) { located = true; break; } } } if (!located) { cout << "Error: No objects at time: " << frame_time << "located at expected position" << endl; } } // the video file will be deinitialized automatically in VideoCapture destructor return 0; }