void THISCLASS::OnStep() { std::vector<Particle> rejectedparticles; // Get and check input image IplImage *inputimage = cvCloneImage(mCore->mDataStructureImageBinary.mImage); IplImage *outputImage = mCore->mDataStructureImageBinary.mImage; //mCore->mDataStructureImageBinary.mImage; if (! inputimage) { AddError(wxT("No input image.")); return; } if (inputimage->nChannels != 1) { AddError(wxT("The input image is not a grayscale image.")); return; } cvZero(outputImage); // We clear the ouput vector mParticles.clear(); // Initialization Particle tmpParticle; // Used to put the calculated value in memory CvMoments moments; // Used to calculate the moments std::vector<Particle>::iterator j; // Iterator used to stock the particles by size // We allocate memory to extract the contours from the binary image CvMemStorage* storage = cvCreateMemStorage(0); CvSeq* contour = 0; // Init blob extraxtion CvContourScanner blobs = cvStartFindContours(inputimage, storage, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE); // This is used to correct the position in case of ROI CvRect rectROI; if (inputimage->roi != NULL) { rectROI = cvGetImageROI(inputimage); } else { rectROI.x = 0; rectROI.y = 0; } while ((contour = cvFindNextContour(blobs)) != NULL) { // Computing the moments cvMoments(contour, &moments); // Computing particle area tmpParticle.mArea = moments.m00; tmpParticle.mCenter.x = (float)(rectROI.x + (moments.m10 / moments.m00 + 0.5)); // moments using Green theorem tmpParticle.mCenter.y = (float)(rectROI.y + (moments.m01 / moments.m00 + 0.5)); // m10 = x direction, m01 = y direction, m00 = area as edicted in theorem // Selection based on area if ((mAreaSelection == false) || ((tmpParticle.mArea <= mMaxArea) && (tmpParticle.mArea >= mMinArea))) { tmpParticle.mCompactness = GetContourCompactness(contour); if ((mCompactnessSelection == false) || ((tmpParticle.mCompactness > mMinCompactness) && (tmpParticle.mCompactness < mMaxCompactness))) { double tempValue = cvGetCentralMoment(&moments, 2, 0) - cvGetCentralMoment(&moments, 0, 2); tmpParticle.mOrientation = atan(2 * cvGetCentralMoment(&moments, 1, 1) / (tempValue + sqrt(tempValue * tempValue + 4 * cvGetCentralMoment(&moments, 1, 1) * cvGetCentralMoment(&moments, 1, 1)))); if ((mOrientationSelection == false) || (((tmpParticle.mOrientation > mMinOrientation) && (tmpParticle.mOrientation < mMaxOrientation)) || ((tmpParticle.mOrientation > mMinOrientation + PI) && (tmpParticle.mOrientation < mMaxOrientation + PI)) || ((tmpParticle.mOrientation > mMinOrientation - PI) && (tmpParticle.mOrientation < mMaxOrientation - PI)))) { cvDrawContours(outputImage, contour, cvScalarAll(255), cvScalarAll(255), 0, CV_FILLED); // Check if we have already enough particles if (mParticles.size() == mMaxNumber) { // If the particle is bigger than the smallest stored particle, store it, else do nothing if (tmpParticle.mArea > mParticles.back().mArea) { // Find the place were it must be inserted, sorted by size for (j = mParticles.begin(); (j != mParticles.end()) && (tmpParticle.mArea < (*j).mArea); j++); // Fill unused values tmpParticle.mID = -1; tmpParticle.mIDCovariance = -1; // Insert the particle mParticles.insert(j, tmpParticle); // Remove the smallest one mParticles.pop_back(); } } else { // The particle is added at the correct place // Find the place were it must be inserted, sorted by size for (j = mParticles.begin(); (j != mParticles.end()) && (tmpParticle.mArea < (*j).mArea); j++); // Fill unused values tmpParticle.mID = -1; tmpParticle.mIDCovariance = -1; // Insert the particle mParticles.insert(j, tmpParticle); } } } } else { rejectedparticles.push_back(tmpParticle); } cvRelease((void**)&contour); } contour = cvEndFindContours(&blobs); // If we need to display the particles /* if(trackingimg->GetDisplay()) { for(j=rejectedparticles.begin();j!=rejectedparticles.end();j++) { trackingimg->DrawCircle(cvPoint((int)(((*j).p).x),(int)(((*j).p).y)),CV_RGB(255,0,0)); } for(j=particles.begin();j!=particles.end();j++) { trackingimg->DrawCircle(cvPoint((int)(((*j).p).x),(int)(((*j).p).y)),CV_RGB(0,255,0)); trackingimg->Cover(cvPoint((int)(((*j).p).x),(int)(((*j).p).y)),CV_RGB(255,0,0),2); } } */ cvReleaseImage(&inputimage); cvRelease((void**)&contour); cvReleaseMemStorage(&storage); // Set these particles mCore->mDataStructureParticles.mParticles = &mParticles; // Let the DisplayImage know about our image DisplayEditor de(&mDisplayOutput); if (de.IsActive()) { de.SetParticles(&mParticles); de.SetMainImage(mCore->mDataStructureImageBinary.mImage); } }
double cveGetCentralMoment(CvMoments* moments, int xOrder, int yOrder) { return cvGetCentralMoment(moments, xOrder, yOrder); }
int main() { raspicam::RaspiCam_Cv Camera; // Camera Object cv::Mat frame; // Set camera params Camera.set(CV_CAP_PROP_FORMAT, CV_8UC3); // For color Camera.set(CV_CAP_PROP_FRAME_WIDTH, 640); Camera.set(CV_CAP_PROP_FRAME_HEIGHT, 480); // Open camera std::cout << "Opening camera...\n"; if (! Camera.open()) { std::cerr << "Error opening camera!\n"; return -1; } // The two windows we'll be using cvNamedWindow("video"); cvNamedWindow("thresh"); cvMoveWindow("video", 0, 0); cvMoveWindow("thresh", 240, 0); int thresh_h[] {0, 18}; int thresh_s[] {160, 255}; int thresh_v[] {144, 255}; const int max_thresh(255); cv::createTrackbar(" H min:", "thresh", &thresh_h[0], max_thresh, nullptr); cv::createTrackbar(" H max:", "thresh", &thresh_h[1], max_thresh, nullptr); cv::createTrackbar(" S min:", "thresh", &thresh_s[0], max_thresh, nullptr); cv::createTrackbar(" S max:", "thresh", &thresh_s[1], max_thresh, nullptr); cv::createTrackbar(" V min:", "thresh", &thresh_v[0], max_thresh, nullptr); cv::createTrackbar(" V max:", "thresh", &thresh_v[1], max_thresh, nullptr); // This image holds the "scribble" data, the tracked positions of the ball IplImage* imgScribble = NULL; cv::Mat frame_mat; while (true) { if (! Camera.grab()) { break; } Camera.retrieve(frame_mat); // Will hold a frame captured from the camera IplImage frame = frame_mat; // If this is the first frame, we need to initialize it if (imgScribble == NULL) { imgScribble = cvCreateImage(cvGetSize(&frame), 8, 3); } // Holds the yellow thresholded image (yellow = white, rest = black) IplImage* imgYellowThresh = GetThresholdedImage(&frame, thresh_h, thresh_s, thresh_v); // Calculate the moments to estimate the position of the ball CvMoments moments; cvMoments(imgYellowThresh, &moments, 1); // The actual moment values double moment10 = cvGetSpatialMoment(&moments, 1, 0); double moment01 = cvGetSpatialMoment(&moments, 0, 1); double area = cvGetCentralMoment(&moments, 0, 0); // Holding the last and current ball positions static int posX = 0; static int posY = 0; int lastX = posX; int lastY = posY; posX = moment10 / area; posY = moment01 / area; // Print it out for debugging purposes printf("position (%d,%d)\n", posX, posY); // We want to draw a line only if its a valid position if (lastX > 0 && lastY > 0 && posX > 0 && posY > 0) { // Draw a yellow line from the previous point to the current point cvLine(imgScribble, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(0,255,255), 5); } // Add the scribbling image and the frame... and we get a combination of the two cvAdd(&frame, imgScribble, &frame); cvCvtColor(&frame, &frame, CV_BGR2RGB); cvShowImage("video", &frame); // cvShowImage("video", imgScribble); cvShowImage("thresh", imgYellowThresh); // Wait for a keypress int c = cvWaitKey(10); if (c != -1) { // If pressed, break out of the loop break; } } return 0; }
int main() { // Initialize capturing live feed from the camera CvCapture* capture = 0; capture = cvCaptureFromCAM(1); //depending on from which camera you are Capturing // Couldn't get a device? Throw an error and quit if(!capture) { printf("Could not initialize capturing...\n"); return -1; } // The two windows we'll be using cvNamedWindow("video"); cvNamedWindow("thresh"); // This image holds the "scribble" data... // the tracked positions of the ball IplImage* imgScribble = NULL; // An infinite loop while(true) { // Will hold a frame captured from the camera IplImage* frame = 0; frame = cvQueryFrame(capture); // If we couldn't grab a frame... quit if(!frame) break; // If this is the first frame, we need to initialize it if(imgScribble == NULL) { imgScribble = cvCreateImage(cvGetSize(frame), 8, 3); } // Holds the yellow thresholded image (yellow = white, rest = black) IplImage* imgYellowThresh = GetThresholdedImage(frame); // Calculate the moments to estimate the position of the ball CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments)); cvMoments(imgYellowThresh, moments, 1); // The actual moment values double moment10 = cvGetSpatialMoment(moments, 1, 0); double moment01 = cvGetSpatialMoment(moments, 0, 1); double area = cvGetCentralMoment(moments, 0, 0); // Holding the last and current ball positions static int posX = 0; static int posY = 0; int lastX = posX; int lastY = posY; posX = moment10/area; posY = moment01/area; // Print it out for debugging purposes printf("position (%d,%d)\n", posX, posY); // We want to draw a line only if its a valid position if(lastX>0 && lastY>0 && posX>0 && posY>0) { // Draw a yellow line from the previous point to the current point cvLine(imgScribble, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(0,255,255), 5); } // Add the scribbling image and the frame... and we get a combination of the two cvAdd(frame, imgScribble, frame); cvShowImage("thresh", imgYellowThresh); cvShowImage("video", frame); // Wait for a keypress int c = cvWaitKey(10); if(c!=-1) { // If pressed, break out of the loop break; } // Release the thresholded image... we need no memory leaks.. please cvReleaseImage(&imgYellowThresh); delete moments; } // We're done using the camera. Other applications can now use it cvReleaseCapture(&capture); return 0; }
int trackObject(IplImage* imgThresh, int R, int G, int B, int player){ // Calculate the moments of 'imgThresh' CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments)); cvMoments(imgThresh, moments, 1); double moment10 = cvGetSpatialMoment(moments, 1, 0); double moment01 = cvGetSpatialMoment(moments, 0, 1); double area = cvGetCentralMoment(moments, 0, 0); // if the area<1000, I consider that the there are no object in the image and it's because of the noise, the area is not zero if(area>1000){ // Draw a yellow line from the previous point to the current point if (player ==1){ posX1 = moment10/area; posY1 = moment01/area; } if (player ==2){ posX2 = moment10/area; posY2 = moment01/area; } switch (player){ case 1: if (posY1-lastY1 < 40 && posY1-lastY1 > 100){printf ("Player%i: Abajo\n", player);caso=0;} if (posY1-lastY1 > 40 && posY1-lastY1 < 100){printf ("Player%i: Abajo\n", player);caso=-1;} if (posY1-lastY1<-40 && posY1-lastY1>-100){printf ("Player%i: Arriba\n", player);caso=1;} if (posX1-lastX1<-50 && posX1-lastX1>-100){printf ("Player%i: Derecha\n", player);caso=2;} if (posX1-lastX1>50 && posX1-lastX1<100){printf ("Player%i: Izquierda\n", player);caso=-2;} lastX1 = posX1; lastY1 = posY1; break; case 2: if (posY2-lastY2 < 40 && posY2-lastY2 > 100){printf ("Player%i: Abajo\n", player);caso=0;} if (posY2-lastY2 > 40 && posY2-lastY2 < 100){printf ("Player%i: Abajo\n", player);caso=-1;} if (posY2-lastY2<-40 && posY2-lastY2>-100){printf ("Player%i: Arriba\n", player);caso=1;} if (posX2-lastX2<-50 && posX2-lastX2>-100){printf ("Player%i: Derecha\n", player);caso=2;} if (posX2-lastX2>50 && posX2-lastX2<100){printf ("Player%i: Izquierda\n", player);caso=-2;} lastX2 = posX2; lastY2 = posY2; break; } } free(moments); return caso; }
int trackObject(IplImage* imgThresh, int R, int G, int B, int player){ // Calculate the moments of 'imgThresh' cvMoments(imgThresh, moments, 1); moment10 = cvGetSpatialMoment(moments, 1, 0); moment01 = cvGetSpatialMoment(moments, 0, 1); area = cvGetCentralMoment(moments, 0, 0); // if the area<1000, I consider that the there are no object in the image and it's because of the noise, the area is not zero if(area>1000){ // Draw a yellow line from the previous point to the current point if (player ==1){ posX1 = moment10/area; posY1 = moment01/area; } if (player ==2){ posX2 = moment10/area; posY2 = moment01/area; } switch (player){ case 1: if (posY1-lastY1 > 20 ){ caso=-1; } if (posY1-lastY1<-20 ){ caso=1; } if (posX1-lastX1<-50 ){ caso=2; } if (posX1-lastX1>50 ){ caso=-2; } break; case 2: if (posY2-lastY2 > 20 ){ caso= -1; } if (posY2-lastY2<-20 ){ caso= 1; } if (posX2-lastX2<-50 ){ caso= 2; } if (posX2-lastX2>50 ){ caso= -2; } break; } lastX1 = posX1; lastY1 = posY1; lastX2 = posX2; lastY2 = posY2; } return caso; }//fin funcion #2