int main() { // Image & hsvImage //IplImage *hsv; // Video Capture CvCapture *capture; // Key for keyboard event char key; // Number of tracked pixels int nbPixels; // Next position of the object we overlay CvPoint objectNextPos; // Initialize the video Capture (200 => CV_CAP_V4L2) capture = cvCreateCameraCapture(200); // Check if the capture is ok if (!capture) { printf("Can't initialize the video capture.\n"); return -1; } // Create the windows cvNamedWindow("Test Color Tracking", CV_WINDOW_AUTOSIZE); cvNamedWindow("Test Mask", CV_WINDOW_AUTOSIZE); cvMoveWindow("Test Color Tracking", 0, 100); cvMoveWindow("Test Mask", 650, 100); // Mouse event to select the tracked color on the original image cvSetMouseCallback("Test Color Tracking", getObjectColor); // While we don't want to quit while(key != 'Q' && key != 'q') { // We get the current image image = cvQueryFrame(capture); // If there is no image, we exit the loop if(!image) continue; objectNextPos = binarisation(image, &nbPixels); addObjectToVideo(image, objectNextPos, nbPixels); // We wait 10 ms key = cvWaitKey(10); } // Destroy the windows we have created cvDestroyWindow("Test Color Tracking"); cvDestroyWindow("Test Mask"); // Destroy the capture cvReleaseCapture(&capture); return 0; }
void ColourToTrack::pinpoint(IplImage* image) { addObjectToVideo(image,binarise(image)); }
/** * The "Main" of the class. This is the master function that will * continuously loop and scan/process images read in from the camera. */ int ColorTracking::RunColorTracking(bool debug) { // Initialize capturing live feed from the camera CvCapture* capture = 0; capture = cvCaptureFromCAM( CV_CAP_ANY ); cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 320); cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 240); // Couldn't get a device? Throw an error and quit if(!capture) { fprintf( stderr, "ERROR: Frame is NULL \n"); getchar(); return -1; } // The two windows we'll be using - for debugging if (debug) { cvNamedWindow("video"); cvNamedWindow("thresh"); } // An infinite loop while(true) { // Will hold a frame captured from the camera IplImage* frame = 0; frame = cvQueryFrame(capture); // If we couldn't grab a frame... quit if(!frame) break; // Create two images and grab red and blue thresholds IplImage* imgRedThresh = GetRedThresholdedImage(frame); IplImage* imgBlueThresh = GetBlueThresholdedImage(frame); // turn the thresholded image into a binary image (white and black only) IplImage* imgRedBinary = cvCreateImage(cvGetSize(frame), 8, 1); cvThreshold(imgRedThresh, imgRedBinary, 10, 255, CV_THRESH_BINARY); // Setup for getting contours for red only CvSeq* redContours; CvMemStorage *redStorage = cvCreateMemStorage(0); // Smooth the image (prevent any edge issues) IplImage* imgRedSmooth = cvCreateImage(cvGetSize(frame), 8, 1); cvSmooth(imgRedBinary, imgRedSmooth, CV_MEDIAN, 7, 3, 0, 0); cvThreshold(imgRedSmooth, imgRedSmooth, 10, 255, CV_THRESH_BINARY); // gets amount of "white" space found - debugging only //int white_count = cvCountNonZero(imgRedThresh); // Find contours of smooth image, then display contours on regular image // Find contours returns the number of contours found int redCount = cvFindContours(imgRedSmooth, redStorage, &redContours); validRedPoints = redCount; int redPointXAvg = 0; int redPointYAvg = 0; // iterate through all red contours and draw a box around them // and draw a dot in the middle of the found object for (int i = 0; i < redCount; i++) { CvRect rect = cvBoundingRect(redContours, 0); int x = rect.x, y = rect.y, h = rect.height, w = rect.width; if (w < 10000 && h < 10000){ // need to dial in this for our LEDs cvRectangle(frame, cvPoint(x, y), cvPoint(x + w, y + h), CV_RGB(0, 255, 0), 1, CV_AA, 0); addObjectToVideo(frame, cvPoint(x + w/2, y + h/2), CV_RGB(255, 0, 0), 2); redPointXAvg += x + w/2; redPointYAvg += y + h/2; //validRedPoints += 1; } redContours = redContours->h_next; } if (validRedPoints > 0) { redPointXAvg = redPointXAvg / validRedPoints; redPointYAvg = redPointYAvg / validRedPoints; addObjectToVideo(frame, cvPoint(redPointXAvg, redPointYAvg), CV_RGB(255, 0, 255), 2); redDistance = drawWidthDiff(frame, cvPoint(redPointXAvg, redPointYAvg), middle); } else { redDistance = 0; } IplImage* imgBlueBinary = cvCreateImage(cvGetSize(frame), 8, 1); cvThreshold(imgBlueThresh, imgBlueBinary, 10, 255, CV_THRESH_BINARY); // Setup for getting contours for red only CvSeq* blueContours; CvMemStorage *blueStorage = cvCreateMemStorage(0); // Smooth the image (prevent any edge issues) IplImage* imgBlueSmooth = cvCreateImage(cvGetSize(frame), 8, 1); cvSmooth(imgBlueBinary, imgBlueSmooth, CV_MEDIAN, 7, 3, 0, 0); cvThreshold(imgBlueSmooth, imgBlueSmooth, 10, 255, CV_THRESH_BINARY); int blueCount = cvFindContours(imgBlueSmooth, blueStorage, &blueContours); validBluePoints = blueCount; int bluePointXAvg = 0; int bluePointYAvg = 0; // iterate through all blue contours and draw a box around them // and draw a dot in the middle of the found object for (int i = 0; i < blueCount; i++) { CvRect rect = cvBoundingRect(blueContours, 0); int x = rect.x, y = rect.y, h = rect.height, w = rect.width; if (w < 10000 && h < 10000){ // need to dial in this for our LEDs cvRectangle(frame, cvPoint(x, y), cvPoint(x + w, y + h), CV_RGB(0, 255, 0), 1, CV_AA, 0); addObjectToVideo(frame, cvPoint(x + w/2, y + h/2), CV_RGB(0, 0, 255), 2); bluePointXAvg += x + w/2; bluePointYAvg += y + h/2; //validBluePoints += 1; } blueContours = blueContours->h_next; } // if we have found valid points, calculate the average between them // and draw a circle at that point. Then draw a line from the middle // to that point to use as a send back value if (validBluePoints > 0) { bluePointXAvg = bluePointXAvg / validBluePoints; bluePointYAvg = bluePointYAvg / validBluePoints; addObjectToVideo(frame, cvPoint(bluePointXAvg, bluePointYAvg), CV_RGB(255, 0, 255), 2); blueDistance = drawWidthDiff(frame, cvPoint(bluePointXAvg, bluePointYAvg), middle); } else { blueDistance = 0; } // middle dot - do this at the end so it shows up in front for debugging addObjectToVideo(frame, middle, CV_RGB(0, 0, 0), 3); // Add the two thresholded images into one - for viewing cvAdd(imgRedSmooth, imgBlueSmooth, imgRedSmooth); // Choose the images we wish to show on the windows // This is for debugging only if (debug) { cvShowImage("thresh", imgRedSmooth); cvShowImage("video", frame); } // Wait for a keypress int c = cvWaitKey(10); // if q or Q is pressed, quit if(c == 81 || c == 113) { // If pressed, break out of the loop break; } // if Space is pressed, tell us the number of contours else if (c == 32) { std::cout << "Red Contours found: " << getRedCount() << std::endl; std::cout << "Blue Contours found: " << getBlueCount() << std::endl; } // if P or p is pressed, save a screen shot else if (c == 80 || c == 112) { cvSaveImage("test.jpg", frame); std::cout << "Screen shot taken." << std::endl; } // if D or d is pressed, tell us distance to blue else if (c == 68 || c == 100) { std::cout << "Distance to red: " << getRedTurn() << std::endl; std::cout << "Distance to blue: " << getBlueTurn() << std::endl; } // Release all images and release the memory storage for contours // this prevents memory leaks cvReleaseImage(&imgRedThresh); cvReleaseImage(&imgRedBinary); cvReleaseImage(&imgRedSmooth); cvReleaseMemStorage(&redStorage); cvReleaseImage(&imgBlueThresh); cvReleaseImage(&imgBlueBinary); cvReleaseImage(&imgBlueSmooth); cvReleaseMemStorage(&blueStorage); //redDistance = 0; //blueDistance = 0; //validRedPoints = 0; //validBluePoints = 0; } // We're done using the camera. Other applications can now use it cvReleaseCapture(&capture); return 0; }