IplImage *CannyFilterPass::operator()(IplImage *source) { // Test du type d'images if (source->nChannels != 1) { emit error(QObject::tr("Le filtre de Canny ne supporte que les image\n") + QObject::tr(" sur canal unique, vous devez donc ajouter\n") + QObject::tr(" \"Niveaux de gris\" en tête de liste\n") + QObject::tr(" avant d'appliquer ce filtre.")); // QApplication::restoreOverrideCursor(); // QMessageBox::warning(0, // QObject::tr("Erreur"), // QObject::tr("Le filtre de Canny ne supporte que les image\n") + // QObject::tr(" sur canal unique, vous devez donc ajouter\n") + // QObject::tr(" \"Niveaux de gris\" en tête de liste\n") + // QObject::tr(" avant d'appliquer ce filtre."); return source; } cvCanny(source, source, threshold1, threshold2, apertureSize); return source; }
int ExpandEdge(const IplImage* pSourceImage, IplImage* pEdge, int nWidth) { if (pSourceImage == NULL || pEdge == NULL) { printf("ExpandEdge input image is NULL!\n"); return -1; } CvSize iSourceSize = cvGetSize(pSourceImage); CvSize iEdgeSize = cvGetSize(pEdge); if (iSourceSize.height != iEdgeSize.height || iSourceSize.width != iEdgeSize.width) { printf("ExpandEdge input image size error!\n"); return -1; } cvCanny(pSourceImage, pEdge, 50, 150, 3); cvSmooth(pEdge, pEdge, CV_BLUR, nWidth, nWidth, 0, 0); cvThreshold(pEdge, pEdge, 0, 200, CV_THRESH_BINARY); return 1; }
void FeatureDetector::processImage(Image* input, Image* output) { printf("Processing\n"); raw=(IplImage*)(*input); cvCopyImage(raw,image); // copyChannel(image,grayscale,2);//Lets try just copying the red channel cvCvtColor(image,grayscale,CV_BGR2GRAY); cvCanny(grayscale,edgeDetected, 50, 100, 3 ); //The two floats are the minimum quality of features, and minimum euclidean distance between features. //The NULL says use the entire image. int numFeatures=maxFeatures; cvGoodFeaturesToTrack(edgeDetected,eigImage,tempImage,features,&numFeatures,.15,30,NULL); singleChannelToThreeChannel(edgeDetected,image); for (int i=0; i<numFeatures; i++) { CvPoint topLeft; CvPoint bottomRight; topLeft.x = (int)features[i].x-2; topLeft.y = (int)features[i].y+2; bottomRight.x = (int)features[i].x+2; bottomRight.y = (int)features[i].y-2; cvLine(image,topLeft,bottomRight,CV_RGB(0,0,255),1,CV_AA,0); int temp; temp=topLeft.y; topLeft.y=bottomRight.y; bottomRight.y=temp; cvLine(image,topLeft,bottomRight,CV_RGB(0,0,255),1,CV_AA,0); } if (output) { OpenCVImage temp(image, false); output->copyFrom(&temp); } }
void CPianoHand::SearchForHand(CIplImage *image) { int x, y; float highMatch = 0; int highValX, highValY, highSplitType; float currMatch; int splitType; int i, j; CIplImage backupImage; backupImage.initialize(IMAGE_WIDTH, IMAGE_HEIGHT, 8); backupImage.copy(image); for (i=0; i < 10; i++) { //Top and Bottom //Left and Right for (j=0; j < ((((i*2)+1)*2) + (((i*2)-1)*2)); j++) { //Loop top row for (x=-i; x <= i; x++) { y = -i; currMatch = CheckForHand(image, x, y, &splitType); if (currMatch > highMatch) { highMatch = currMatch; highValX = x; highValY = y; highSplitType = splitType; } } for (x=-i; x <= i; x++) { y = i; currMatch = CheckForHand(image, x, y, &splitType); if (currMatch > highMatch) { highMatch = currMatch; highValX = x; highValY = y; highSplitType = splitType; } } for (y=-i; y <= i; y++) { x = -i; currMatch = CheckForHand(image, x, y, &splitType); if (currMatch > highMatch) { highMatch = currMatch; highValX = x; highValY = y; highSplitType = splitType; } } for (y=-i; y <= i; y++) { x = i; currMatch = CheckForHand(image, x, y, &splitType); if (currMatch > highMatch) { highMatch = currMatch; highValX = x; highValY = y; highSplitType = splitType; } } } } if (highMatch > 0) { int x1, y1, x2, y2; cvCopy(backupImage.getIplImage(), image->getIplImage(), NULL); computeBlob(&backupImage, &backupImage, m_center.x+highValX, m_center.y+highValY, 100, &x1, &y1, &x2, &y2); CPianoHand tempHand; if (highSplitType == 0) //Center Reference tempHand = *(new CPianoHand(0, x1, y1, x2, y2)); else if (highSplitType == 1)//Top-left reference tempHand = *(new CPianoHand(0, x1, y1, x1+m_boundingBox.width, y1+m_boundingBox.height)); else if (highSplitType == 2) //bottom-right reference tempHand = *(new CPianoHand(0, x2-m_boundingBox.width, y2-m_boundingBox.height, x2, y2)); else //Center reference, without much width change tempHand = *(new CPianoHand(0, x1, y1, x1+m_boundingBox.width, y2)); UpdateWithHand(&tempHand); //Create Image Hands Mask Image from Bounding Box for (x=0; x < IMAGE_WIDTH; x++) { for (y=0; y < IMAGE_HEIGHT; y++) { m_handsImage.getIplImage()->imageData[y*IMAGE_WIDTH+x]=0; m_traceImage.getIplImage()->imageData[y*IMAGE_WIDTH+x]=0; if (x >= tempHand.m_boundingBox.x && x < (tempHand.m_boundingBox.x+tempHand.m_boundingBox.width)) { if (y >= tempHand.m_boundingBox.y && y < (tempHand.m_boundingBox.y+tempHand.m_boundingBox.height)) { m_handsImage.getIplImage()->imageData[y*IMAGE_WIDTH+x] = (unsigned char)image->getIplImage()->imageData[y*IMAGE_WIDTH+x]; } } } } CIplImage tempImage; tempImage.initialize(IMAGE_WIDTH, IMAGE_HEIGHT, 8); cvDilate(m_handsImage.getIplImage(), m_edgeImage.getIplImage(), NULL, 1); cvErode(m_edgeImage.getIplImage(), tempImage.getIplImage(), NULL, 1); cvCanny(tempImage.getIplImage(), m_edgeImage.getIplImage(), 0, 1, 3); /*DrawBox(m_imb_edgeDetectedImage.getIplImage(), x1, y1, x2, y2, 1); (*numHands)++;*/ } }
void rotate(IplImage *img) { CvRect rect; IplImage *imgLine; rect.x=GlobalGandhiji.x+GlobalGandhiji.width; rect.y=GlobalGandhiji.y-5; rect.width=(int)((GlobalGandhiji.width)-5); rect.height=GlobalGandhiji.height+15; if(GlobalGandhiji.matchval!=-1 && rect.x>0 && rect.y>0 && rect.y+rect.height<= img->height && rect.x+rect.width<= img->width) { imgLine=cropRectangle(img,rect); cvNamedWindow("imgLine",1); cvShowImage("imgLine",imgLine); IplImage* src1 = cvCreateImage( cvGetSize(imgLine), 8, 1 ); cvCvtColor( imgLine, src1, CV_RGB2GRAY ); IplImage* dst = cvCreateImage( cvGetSize(src1), 8, 1 ); IplImage* color_dst = cvCreateImage( cvGetSize(src1), 8, 3 ); CvMemStorage* storage = cvCreateMemStorage(0); CvSeq* lines = 0; int i; cvCanny( src1, dst,50, 150, 3 ); //cvDilate( dst, dst, 0, 1 ); cvNamedWindow("edgedest",1); cvShowImage("edgedest",dst); cvCvtColor( dst, color_dst, CV_GRAY2BGR ); #if 1 lines = cvHoughLines2( dst, storage, CV_HOUGH_STANDARD, 1, CV_PI/180, 30, 0, 0 ); for( i = 0; i < MIN(lines->total,100); i++ ) { float* line = (float*)cvGetSeqElem(lines,i); float rho = line[0]; float theta = line[1]; printf("theta = %f",(theta*180/3.142)); CvPoint pt1, pt2; double a = cos(theta), b = sin(theta); double x0 = a*rho, y0 = b*rho; printf("a= %f b=%f x0=%f y0=%f roh=%f\n", a,b,x0,y0,rho); pt1.x = cvRound(x0 + 1000*(-b)); pt1.y = cvRound(y0 + 1000*(a)); pt2.x = cvRound(x0 - 1000*(-b)); pt2.y = cvRound(y0 - 1000*(a)); printf(" x1 = %d, y1 = %d",pt1.x,pt1.y); printf(" x2 = %d, y2 = %d\n\n",pt2.x,pt2.y); //if((theta*180/3.142) < 100 && (theta*180/3.142) > 79 ) cvLine( color_dst, pt1, pt2, CV_RGB(255,0,0), 3, 8 ); } #else lines = cvHoughLines2( dst, storage, CV_HOUGH_PROBABILISTIC, 1, CV_PI/180, 30, 0, 0 ); for( i = 0; i < lines->total; i++ ) { CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i); cvLine( color_dst, line[0], line[1], CV_RGB(255,0,0), 3, 8 ); } #endif cvNamedWindow( "Hough", 1 ); cvShowImage( "Hough", color_dst ); } /* */ }
int snaps_nav(struct snaps_nav_state_struct* nav_state, time_t *ptr_framestamptime){ //Get frame from camera for (int i=0; i<6; i++){ img = cvQueryFrame( capture ); if ( !img ) { fprintf( stderr, "ERROR: frame is null...\n" ); getchar(); break; } } timestamp_frame(ptr_framestamptime); //Crop Image code cvSetImageROI(img,cvRect(1,1,540,380)); cvCopy(img, Cropped, NULL); //Change the color format from BGR to HSV cvCvtColor(Cropped, imgHSV, CV_BGR2HSV); //copy original img to be displayed in drawn Targets/DM img cvCopy(Cropped, TimgDrawn, NULL ); cvInRangeS(imgHSV, cvScalar(T_range_low,0,0,0), cvScalar(T_range_high,255,255,0), TargetsFilter); cvInRangeS(imgHSV, cvScalar(DM_range_low,0,0,0), cvScalar(DM_range_high,255,255,0), DMFilter); //Magenta Marker Image Processing cvErode(TargetsFilter, TargetsFilter, 0, 1); cvDilate(TargetsFilter, TargetsFilter, NULL, 1); //Dilate image cvSmooth(TargetsFilter, TargetsFilter, CV_GAUSSIAN, 3, 0, 0.0, 0.0); //Smooth Target image*/ //Orange Target Image Processing cvErode(DMFilter, DMFilter, 0, 1); cvDilate(DMFilter, DMFilter, NULL, 1); //Dilate image //cvSmooth(DMFilter, DMFilter, CV_GAUSSIAN, 3, 0, 0.0, 0.0); //Smooth DM image //Show filtered Images cvShowImage("TargetsFilter", TargetsFilter); //Show Targets filter image cvShowImage("DMFilter", DMFilter); //Show DM filter image //Show Noise Filter //Perform Canny on Images cvCanny(TargetsFilter, TimgCanny, T_canny_low, T_canny_high, 3); // Apply canny filter to the targets image cvCanny(DMFilter, DMimgCanny, DM_canny_low, DM_canny_high, 3); // Apply canny filter to the DM image cvShowImage("TCannyImage", TimgCanny); cvShowImage("DMCannyImage", DMimgCanny); // Find and Draw circles for the Targets image CvPoint Tpt; CvSeq* TimgHCirc = cvHoughCircles( TimgCanny, TcircStorage, CV_HOUGH_GRADIENT, // in, out, method, 2, //precision of the accumulator (2x the input image) T_rad_max*4, //min dist between circles T_tol_max, T_tol_min, //parm1, parm2 T_rad_min, T_rad_max); //min radius, max radius for (int i = 0; i < TimgHCirc->total; i++) { float* p = (float*) cvGetSeqElem(TimgHCirc, i); // To get the circle coordinates CvPoint pt = cvPoint(cvRound(p[0]), cvRound(p[1])); // Draw center of circles in green cvCircle(TimgDrawn, pt, 1, CV_RGB(0,255,0), -1, 8, 0 ); cvCircle(TimgDrawn, pt, cvRound(p[2]), CV_RGB(255,255,0), 2, 8, 0); // img, center, radius, color, thickness, line type, shift Tpt = cvPoint(cvRound(p[0]), cvRound(p[1])); if (i == 0){ Tpt = cvPoint(cvRound(p[0]), cvRound(p[1])); printf("Magenta Marker (x,y) - (%d, %d) \n", Tpt.x, Tpt.y); } else {printf("TM - There is an extra point frame not good"); } } // end of for // Find and Draw circles for the DM image CvPoint DMpt; CvSeq* DMimgHCirc = cvHoughCircles( DMimgCanny, DMcircStorage, CV_HOUGH_GRADIENT, // in, out, method, 2, //precision of the accumulator (2x the input image) DM_rad_max*4, //min dist between circles DM_tol_max, DM_tol_min, //parm1, parm2 DM_rad_min, DM_rad_max); //min radius, max radius for (int i=0; i<DMimgHCirc->total; i++) { float* p = (float*) cvGetSeqElem(DMimgHCirc, i); CvPoint pt = cvPoint(cvRound(p[0]), cvRound(p[1])); // Draw center of circles in green cvCircle(TimgDrawn, pt, 1, CV_RGB(255,0,0), -1, 8, 0 ); cvCircle(TimgDrawn, pt, cvRound(p[2]), CV_RGB(255,127,0), 2, 8, 0); // img, center, radius, color, thickness, line type, shift if (i == 0){ DMpt = cvPoint(cvRound(p[0]), cvRound(p[1])); printf("Red Marker(x,y) - (%d, %d)\n", DMpt.x, DMpt.y); } else {printf("DM - There is an extra point frame not good"); } } // end of for //Draw line in between points cvLine(TimgDrawn, Tpt, DMpt, CV_RGB(0,255,0), 1, 8, 0); d = sqrt(pow(Tpt.x-DMpt.x, 2)+pow(Tpt.y-DMpt.y, 2)); //distance in between points printf("Distance in between tagets %d \n", d); //Magenta target coordinates int MT_pt_x = Tpt.x; int MT_pt_y = Tpt.y; //Orange target coordinates int OT_pt_x = DMpt.x; int OT_pt_y = DMpt.y; //Minimum of the two coordinates int x_min; int x_max; int y_min; int y_max; if (MT_pt_x > OT_pt_x){ x_min = OT_pt_x; x_max = MT_pt_x; } else{ x_min = MT_pt_x; x_max = OT_pt_x; } //printf("x_min %d \n", x_min); //printf("x_max %d \n", x_max); if (MT_pt_y > OT_pt_y){ y_min = OT_pt_y; y_max = MT_pt_y; } else{ y_min = MT_pt_y; y_max = OT_pt_y; } //printf("y_min %d", y_min); //printf("y_max %d", y_max); //Center of targets point (CT) int CT_pt_x = (((x_max - x_min)/2) + x_min); int CT_pt_y = (((y_max - y_min)/2) + y_min); printf("Center coordinate (x, y) - (%d, %d) \n", CT_pt_x, CT_pt_y); //Draw halfway targets point CvPoint CT_pt = cvPoint(cvRound(CT_pt_x), cvRound(CT_pt_y)); cvCircle(img, CT_pt, 2, CV_RGB(255,0,0), -1, 8, 0); //Orientation int orientation_x = (OT_pt_x - CT_pt_x); int orientation_y = (CT_pt_y - OT_pt_y); double Theta = (((atan2(orientation_y, orientation_x )) * (180/3.14))+360); //if printf("Orientation %f Degrees \n", Theta); //cvResetImageROI(img); cvShowImage("TDrawnImage", TimgDrawn); //cvShowImage("DMDrawnImage", DMimgDrawn); //clear memory for target and DM circle finder //note: this may not be necessary cvClearMemStorage(TcircStorage); cvClearMemStorage(DMcircStorage); return 0; }
//-------------------------------------------------------------- void testApp::update(){ bool bNewFrame = false; #ifdef _USE_LIVE_VIDEO vidGrabber.grabFrame(); bNewFrame = vidGrabber.isFrameNew(); #else vidPlayer.idleMovie(); bNewFrame = vidPlayer.isFrameNew(); #endif if (bNewFrame){ #ifdef _USE_LIVE_VIDEO colorImg.setFromPixels(vidGrabber.getPixels(), cw,ch); #else colorImg.setFromPixels(vidPlayer.getPixels(), cw,ch); #endif kx = (float) ofGetWidth() / cw; ky = (float) ofGetHeight() / ch; cvSmooth(colorImg.getCvImage(), medianImg.getCvImage(), CV_MEDIAN, medianValue, medianValue); medianImg.flagImageChanged(); grayImage = medianImg; cvCvtColor(colorImg.getCvImage(), hsvImage.getCvImage(), CV_RGB2HSV); hsvImage.flagImageChanged(); cvSetImageCOI(hsvImage.getCvImage(), 2); cvCopy(hsvImage.getCvImage(), satImage.getCvImage()); satImage.flagImageChanged(); cvSetImageCOI(hsvImage.getCvImage(), 0); //cvSmooth(satImage.getCvImage(), satImage.getCvImage(), CV_BLUR, 3, 3, 0, 0); cvAdaptiveThreshold(grayImage.getCvImage(), trsImage.getCvImage(), 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY, adaptiveThreshValue); //cvCanny(trsImage.getCvImage(), trsImage.getCvImage(), sb, sb*4, 3); trsImage.flagImageChanged(); // cvSmooth(satImage.getCvImage(), satImage.getCvImage(), CV_MEDIAN, 7, 7); // cvSmooth( iplImage, iplImage, CV_BLUR, br, br, 0, 0 ); // cvSmooth( iplImage, iplImage, CV_MEDIAN, 7, 7); cvCanny( grayImage.getCvImage(), cannyImage.getCvImage(), cannyThresh1Value, cannyThresh2Value, cannyApertureValue); cannyImage.flagImageChanged(); //cvPyrMeanShiftFiltering(colorImg.getCvImage(), colorImg.getCvImage(), 20, 40, 2); if (mode==MODE_DRAWING) { if (draw_edges) { #if PROBABILISTIC_LINE lines = cvHoughLines2( cannyImage.getCvImage(), linesStorage, CV_HOUGH_PROBABILISTIC, 1, CV_PI/180, lineThreshValue, lineMinLengthValue, lineMaxGapValue ); #else lines = cvHoughLines2( cannyImage.getCvImage(), linesStorage, CV_HOUGH_STANDARD, 1, CV_PI/180, 100, 0, 0 ); #endif } if (draw_contours || draw_approx) { cvFindContours(cannyImage.getCvImage(), edgesStorage, &edgeContours); CvSeq* contour = edgeContours; while (contour!=NULL) { for (int j = 0; j < contour->total; j++){ CvPoint* p1 = CV_GET_SEQ_ELEM(CvPoint, contour, j); p1->x = p1->x*(float)kx; p1->y = p1->y*(float)ky; } contour = contour->h_next; } } if (draw_fills) { cvFindContours(trsImage.getCvImage(), fillsStorage, &fillContours); CvSeq* contour = fillContours; while (contour!=NULL) { for (int j = 0; j < contour->total; j++){ CvPoint* p1 = CV_GET_SEQ_ELEM(CvPoint, contour, j); p1->x = p1->x*(float)kx; p1->y = p1->y*(float)ky; } contour = contour->h_next; } } } } // update scope // float* rand = new float[50]; // for(int i=0 ;i<50; i++){ // rand[i] = ofRandom(-1.0,1); // // } // // gui->update(scope, kofxGui_Set_FloatArray, rand, sizeof(float*)); // // // make 3 seconds loop // float f = ((ofGetElapsedTimeMillis()%3000) / 3000.0); // gui->update(points, kofxGui_Set_Float, &f, sizeof(float)); }
bool IrisFinderHough::Find(IplImage* image, CvRect eyeROI) { if (!ParametersValid()) return false; if (m_sizeData.SizeChanged(eyeROI)) PrepareImage(eyeROI); // some helper imgs IplImage* imgSobelH = cvCreateImage(cvSize(eyeROI.width, eyeROI.height), IPL_DEPTH_16S, 1); IplImage* imgSobelV = cvCreateImage(cvSize(eyeROI.width, eyeROI.height), IPL_DEPTH_16S, 1); // copy roi to internal image ImgLib::CopyRect(image, m_eyeImg, eyeROI, cvPoint(0, 0)); cvSobel(m_eyeImg, imgSobelH, 1, 0, 3); cvSobel(m_eyeImg, imgSobelV, 0, 1, 3); double angle; double dx, dy; double thetaRad; double xPrim, yPrim; double xsi; double max_e = 2.2; HoughAccumulator acc(m_accPrecision); acc.AddParam(0, m_eyeImg->width); // x0 acc.AddParam(0, m_eyeImg->height); // x1 acc.AddParam(m_thetaMin, m_thetaMax); // theta acc.AddParam(m_aMin, m_aMax); // a acc.AddParam(m_bMin, m_bMax); // b acc.Init(); DOUBLEVECT indices; indices.resize(5); cvSmooth(m_eyeImg, m_eyeImg); cvCanny(m_eyeImg, m_eyeImg, 250, 100); for(int y = 0; y < m_eyeImg->height; y++) { short* sh_row = (short*)(imgSobelH->imageData + y * imgSobelH->widthStep); short* sv_row = (short*)(imgSobelV->imageData + y * imgSobelV->widthStep); uchar* canny_row = (uchar *)(m_eyeImg->imageData + y * m_eyeImg->widthStep); double x0, y0; double a, b, theta=0; for (int x = 0; x < m_eyeImg->width; x++) { if (canny_row[x] == 0) continue; short dX = sh_row[x]; short dY = sv_row[x]; if ( (abs(dX) + abs(dY)) < m_minGradStrength) { cvLine(m_eyeImg, cvPoint(x,y),cvPoint(x,y),CV_RGB(0,0,0)); continue; } for (a = m_aMin; a < m_aMax; a+= (1 / m_accPrecision)) for (b = m_bMin; b < m_bMax; b+= (1 / m_accPrecision)) { double e = a / b; if (e < 1) e = b / a; if (e > max_e) continue; for (theta = m_thetaMin; theta < m_thetaMax; theta += (1 / m_accPrecision)) { angle = atan2((float)dY, (float)dX); thetaRad = 2 * CV_PI * theta / 360.0; angle -= (thetaRad + CV_PI / 2.0); xsi = tan(angle); //xsi = (float) dY / (float) dX; dx = -SignX(dX, dY) * a / sqrt(1 + (b * b) / (a * a * xsi * xsi)); dy = -SignY(dX, dY) * b / sqrt(1 + (a * a * xsi * xsi) / (b * b)); // rotate by theta xPrim = cos(thetaRad) * dx - sin(thetaRad) * dy; yPrim = sin(thetaRad) * dx + cos(thetaRad) * dy; dx = xPrim; dy = yPrim; x0 = x + dx; y0 = y + dy; indices[0] = x0; indices[1] = y0; indices[2] = theta; indices[3] = a; indices[4] = b; acc.Increment(indices); } } } } indices = acc.FindBest(); if (indices.size() > 0) { cvEllipse(image, cvPoint(indices[0] + eyeROI.x, indices[1] + eyeROI.y), cvSize(indices[3], indices[4]), -indices[2], // 90, 0, 360, CV_RGB(255, 0, 0)); m_irisCentre.x = indices[0] + eyeROI.x; m_irisCentre.y = indices[1] + eyeROI.y; return true; } return false; }
int main(int argc, char **argv) { int first = 1; int angle = 0; double duration = 5; IplImage *image = 0; IplImage *image2 = 0; IplImage *prev = 0; IplImage *output = 0; IplImage *depth; IplImage *diff = 0; IplImage *bw = 0; IplImage *edge = 0; IplImage *edge2 = 0; // if (!prev) prev = cvCreateImageHeader(cvSize(640,480), 8, 3); //if (!diff) diff = cvCreateImageHeader(cvSize(640,480), 8, 3); diff = cvCreateImage(cvSize(640,480),8,3); bw = cvCreateImage(cvSize(640,480),8,1); edge = cvCreateImage(cvSize(640,480),8,1); edge2 = cvCreateImage(cvSize(640,480),8,1); output = cvCreateImage(cvSize(640,480),8,3); cvZero( output ); //cvCvtColor(output, output, CV_RGB2BGR); while (1) { switch(cvWaitKey(10)){ case 113: exit(0); case 'w': angle++; if(angle > 30) angle = 30; set_tilt_cv(angle,0); break; case 'x': angle--; if(angle < -30) angle = -30; set_tilt_cv(angle,0); break; case 's': angle = 0; set_tilt_cv(angle,0); break; case 'e': angle += 10; if(angle > 30) angle = 30; set_tilt_cv(angle,0); break; case 'c': angle -=10; if(angle < -30) angle = -30; set_tilt_cv(angle,0); break; default: // cvWaitKey(700); if(first){ prev = freenect_sync_get_rgb_cv(0); //first = 0; } else { prev = cvCloneImage(image2); cvReleaseImage(&image2); } image = freenect_sync_get_rgb_cv(0); image2 = cvCloneImage(image); if (!image) { printf("Error: Kinect not connected?\n"); return -1; } cvCvtColor(image, image, CV_RGB2BGR); // cvCvtColor(image2, image2, CV_RGB2BGR); cvAbsDiff(image,prev,diff); cvCvtColor(diff, bw,CV_BGR2GRAY); cvCanny(bw, edge, 29000,30500,7); // cvThreshold(bw,bw,100,254,CV_THRESH_BINARY); cvNot(edge,edge2); if(!first) { cvSubS(output,cvScalar(255,255,255,255),output,0); cvAnd(GlViewColor(depth),GlViewColor(depth),output,edge); //cvRunningAvg(GlViewColor(depth),output,1,edge); } //cvRunningAvg(image,output,1,edge); if(!first) { cvReleaseImage(&prev); } else first = 0; cvAddWeighted(image, .3, output, .7, 1, image); // OverlayImage(image2, output, cvPoint(0, 0), cvScalar(0.8,0.8,0.8,0.8), cvScalar(0.2,0.2,0.2,0.2)); /* CvPoint* points[1]; CvPoint ptt[5]; points[0] = &(ptt[0]); points[0][0] = cvPoint(100,100); points[0][1] = cvPoint(200,100); points[0][2] = cvPoint(150,150); points[0][3] = cvPoint(150,300); points[0][4] = cvPoint(100,250); int npts[1]; npts[0]=5; cvPolyLine(image, points, npts, 1,1, cvScalar(100,100,100,230),1, 8,0); cvFillPoly(image, points, npts,1, cvScalar(100,100,100,230), 8,0); */ depth = freenect_sync_get_depth_cv(0); cvSmooth(depth,depth,CV_BLUR,18,18,2.0,2.0); if (!depth) { printf("Error: Kinect not connected?\n"); return -1; } cvShowImage("RGB", image); cvShowImage("Output", output); cvShowImage("Depth", GlViewColor(depth)); break; } } }
int start_video(char *peer, char *port, vid_options_t *vopt) { width = GET_WIDTH(vopt->width); height = GET_HEIGHT(vopt->height); render_type = vopt->render_type; disp_bandwidth = vopt->disp_bandwidth; display_options_t dopt; memset(&dopt, 0, sizeof(display_options_t)); dopt.intensity_threshold = vopt->intensity_threshold; dopt.saturation = vopt->saturation; dopt.monochrome = vopt->monochrome; dopt.r = vopt->r; dopt.g = vopt->g; dopt.b = vopt->b; dopt.ascii_values = vopt->ascii_values; init_screen(&dopt); curs_set(0); pthread_mutex_init(&conslock, NULL); cons = calloc(1, sizeof(connection_t)); if (p2p_connect(peer, port, &(cons[0]))) { fprintf(stderr, "Unable to connect to server.\n"); } else { conslen++; } pthread_t thr; pthread_create(&thr, NULL, &dolisten, (void *)port); IplImage* color_img; IplImage* resize_img = cvCreateImage(cvSize(width, height), 8, 3); IplImage* edge = cvCreateImage(cvGetSize(resize_img), IPL_DEPTH_8U, 1); cv_cap = cvCaptureFromCAM(0); char line_buffer[sizeof(unsigned long) + width * depth]; struct timespec tim, actual_tim; tim.tv_sec = 0; tim.tv_nsec = (1000000000 - 1) / vopt->refresh_rate; int kernel = 7; while (1) { /* Get each frame */ color_img = cvQueryFrame(cv_cap); if(color_img && resize_img) { cvResize(color_img, resize_img, CV_INTER_AREA); if (vopt->edge_filter) { cvCvtColor(resize_img, edge, CV_BGR2GRAY); cvCanny(edge, edge, vopt->edge_lower * kernel * kernel, vopt->edge_upper * kernel * kernel, kernel); cvCvtColor(edge, resize_img, CV_GRAY2BGR); } unsigned long line_index; for (line_index = 0; line_index < (resize_img->imageSize / (width * depth)); line_index++) { memset(line_buffer, 0, sizeof(line_buffer)); unsigned long send_index = htonl(line_index); memcpy(line_buffer, &send_index, sizeof(unsigned long)); memcpy(&(line_buffer[sizeof(unsigned long)]), resize_img->imageData + (line_index * width * depth), width * depth); p2p_broadcast(&cons, &conslen, &conslock, line_buffer, + sizeof(line_buffer)); } nanosleep(&tim, &actual_tim); } } /* Housekeeping */ cvReleaseCapture(&cv_cap); end_screen(); return 0; }
int main(int argc, char* argv[]) { IplImage* src = 0; IplImage* dst = 0; IplImage* color_dst = 0; char* filename = argc >= 2 ? argv[1] : "Image0.jpg"; //get gray image src = cvLoadImage(filename, CV_LOAD_IMAGE_GRAYSCALE); if (!src){ printf("[!] Error: cant load image: %s \n", filename); return -1; } printf("[i] image: %s\n", filename); //array for lines CvMemStorage* storage = cvCreateMemStorage(0); CvSeq* lines = 0; int i, p, i1, i2, x, y; double angle1, angle2, phi1, phi2, tetta, b1, b2, k1, k2; //phi - starting(worst) result, angle - needed result phi1 = 0; phi2 = CV_PI / 2; angle1 = CV_PI / 2; angle2 = 0; dst = cvCreateImage(cvGetSize(src), 8, 1); color_dst = cvCreateImage(cvGetSize(src), 8, 3); //Canny edge detector cvCanny(src, dst, 50, 70, 3); //convert into color picture cvCvtColor(dst, color_dst, CV_GRAY2BGR); //finding lines lines = cvHoughLines2(dst, storage, CV_HOUGH_PROBABILISTIC, 1, CV_PI / 180, 100, 50, 10); //Finding needed lines (by angle between axis 0x and this line). For finding the point it's necessary to find 2 two out of 3 lines for (i = 0; i < lines->total; i++) { CvPoint* line = (CvPoint*)cvGetSeqElem(lines, i); //check for correct finding tg if (line[0].x >= line[1].x) { p = line[0].x; line[0].x = line[1].x; line[1].x = p; p = line[0].y; line[0].y = line[1].y; line[1].y = p; } tetta = atan((double)(line[1].y - line[0].y) / (double)(line[1].x - line[0].x)); //first line if (abs(angle1 - abs(tetta)) < abs(angle1 - abs(phi1))) { phi1 = tetta; i1 = i; } //second line if (abs(angle2 - abs(tetta)) < abs(angle2 - abs(phi2))) { //abs(phi2-tetta) <= check threshold between old line and new one if ((abs(phi2-tetta)<=0.25) && (i != 0)) { CvPoint* lastline = (CvPoint*)cvGetSeqElem(lines, i2); //needed lower of lines with similar angle if (lastline[0].y < line[0].y) { i2 = i; phi2 = tetta; } } else { i2 = i; phi2 = tetta; } } } CvPoint* line1 = (CvPoint*)cvGetSeqElem(lines, i1); CvPoint* line2 = (CvPoint*)cvGetSeqElem(lines, i2); //Building lines cvLine(color_dst, line1[0], line1[1], CV_RGB(255, 0, 0), 1, CV_AA, 0); cvLine(color_dst, line2[0], line2[1], CV_RGB(255, 0, 0), 1, CV_AA, 0); //Finding and building point k1 = tan(phi1); k2 = tan(phi2); b1 = line1[0].y - k1*line1[0].x; b2 = line2[0].y - k2*line2[0].x; x = (b2 - b1) / (k1 - k2); y = k1*x + b1; CvPoint* point = new CvPoint(); point[0].x = x; point[1].x = x; point[0].y = y; point[1].y = y; cvLine(color_dst, point[0], point[1], CV_RGB(0, 255, 0), 5, CV_AA, 0); //Showing cvNamedWindow("Source", 1); cvShowImage("Source", src); cvNamedWindow("Point", 1); cvShowImage("Point", color_dst); //Waiting for key cvWaitKey(0); cvSaveImage("test.jpg", color_dst); //Releasing resources cvReleaseMemStorage(&storage); cvReleaseImage(&src); cvReleaseImage(&dst); cvReleaseImage(&color_dst); cvDestroyAllWindows(); return 0; }
// initialize the main function int main(int argc, char *argv[]) { if (argc < 2) { printf("Usage: %s <img.jpg>\n", argv[0]); return 1; } IplImage* picture = cvLoadImage(argv[1]); IplImage* greyImg = cvCreateImage(cvGetSize(picture), IPL_DEPTH_8U, 1); IplImage* cannyImg = cvCreateImage(cvGetSize(picture), IPL_DEPTH_8U, 1); IplImage* drawnImg = cvCreateImage(cvGetSize(picture), IPL_DEPTH_8U, 3); IplImage* contrastImg = cvCreateImage(cvGetSize(picture), IPL_DEPTH_8U, 1); cvNamedWindow("Image", CV_WINDOW_AUTOSIZE); cvNamedWindow("Canny", CV_WINDOW_AUTOSIZE); cvNamedWindow("Threshold", CV_WINDOW_NORMAL); cvCvtColor(picture, greyImg, CV_BGR2GRAY); cvEqualizeHist(greyImg, greyImg); CvMemStorage* storage = cvCreateMemStorage(0); while (1) { // Create trackbars cvCopy(picture, drawnImg); // picture to be displayed cvCreateTrackbar( "min_dist", "Image", &min_dist_switch_value, 49, switch_min_dist ); cvCreateTrackbar( "dp", "Image", &dp_switch_value, 9, switch_dp ); cvCreateTrackbar( "High", "Canny", &high_switch_value, 499, switch_callback_h ); cvCreateTrackbar( "Low", "Canny", &low_switch_value, 499, switch_callback_l ); cvCreateTrackbar( "Threshold", "Threshold", &threshold_switch_value, 199, switch_threshold ); cvCreateTrackbar( "Max", "Threshold", &threshold_max_switch_value, 500, switch_threshold_max ); int N = 7; double dp = dpInt+1; double min_dist = min_distInt+1; double lowThresh = lowInt + 1; double highTresh = highInt + 1; double threshold = thresholdInt+1; double threshold_max = threshold_maxInt+1; cvThreshold(greyImg, contrastImg, threshold, threshold_max, CV_THRESH_TOZERO_INV); cvCanny(contrastImg, cannyImg, lowThresh*N*N, highTresh*N*N, N); // CvSeq* circles =cvHoughCircles(greyImg, storage, CV_HOUGH_GRADIENT, 35, 25); CvSeq* circles =cvHoughCircles(cannyImg, storage, CV_HOUGH_GRADIENT, dp, min_dist); // dp is image resolution // min_dist is the minimum distance between circles for (int i = 0; i < (circles ? circles->total : 0); i++) { float* p = (float*)cvGetSeqElem( circles, i ); cvCircle( drawnImg, cvPoint(cvRound(p[0]),cvRound(p[1])),3, CV_RGB(0,255,0), -1, 8, 0 ); } cvShowImage("Image", drawnImg); cvShowImage("Canny", cannyImg); cvShowImage("Threshold", contrastImg); char b; while (b != 98) { b = cvWaitKey(1); } b=0; } }
CV_IMPL CvSeq* cvSegmentImage(const CvArr* srcarr, CvArr* dstarr, double canny_threshold, double ffill_threshold, CvMemStorage* storage) { CvSeq* root = 0; CvMat* gray = 0; CvMat* canny = 0; //CvMat* temp = 0; void* stack = 0; CV_FUNCNAME("cvSegmentImage"); __BEGIN__; CvMat srcstub, *src; CvMat dststub, *dst; CvMat* mask; CvSize size; CvPoint pt; int ffill_lw_up = cvRound(fabs(ffill_threshold)); CvSeq* prev_seq = 0; CV_CALL(src = cvGetMat(srcarr, &srcstub)); CV_CALL(dst = cvGetMat(dstarr, &dststub)); size = cvGetSize(src); CV_CALL(gray = cvCreateMat(size.height, size.width, CV_8UC1)); CV_CALL(canny = cvCreateMat(size.height, size.width, CV_8UC1)); //CV_CALL( temp = cvCreateMat( size.height/2, size.width/2, CV_8UC3 )); CV_CALL(stack = cvAlloc(size.width * size.height * sizeof(Seg))); cvCvtColor(src, gray, CV_BGR2GRAY); cvCanny(gray, canny, 0/*canny_threshold*0.4*/, canny_threshold, 3); cvThreshold(canny, canny, 1, 1, CV_THRESH_BINARY); //cvZero( canny ); //color_derv( src, canny, canny_threshold ); //cvPyrDown( src, temp ); //cvPyrUp( temp, dst ); //src = dst; mask = canny; // a new name for new role // make a non-zero border. cvRectangle(mask, cvPoint(0, 0), cvPoint(size.width - 1, size.height - 1), cvScalarAll(1), 1); for (pt.y = 0; pt.y < size.height; pt.y++) { for (pt.x = 0; pt.x < size.width; pt.x++) { if (mask->data.ptr[mask->step* pt.y + pt.x] == 0) { CvConnectedComp region; int avgVal[3] = { 0, 0, 0 }; icvSegmFloodFill_Stage1(src->data.ptr, src->step, mask->data.ptr, mask->step, size, pt, avgVal, ffill_lw_up, ffill_lw_up, ®ion, stack); /*avgVal[0] = (avgVal[0] + 15) & -32; if( avgVal[0] > 255 ) avgVal[0] = 255; avgVal[1] = (avgVal[1] + 15) & -32; if( avgVal[1] > 255 ) avgVal[1] = 255; avgVal[2] = (avgVal[2] + 15) & -32; if( avgVal[2] > 255 ) avgVal[2] = 255;*/ if (storage) { CvSeq* tmpseq = icvGetComponent(mask->data.ptr, mask->step, region.rect, storage); if (tmpseq != 0) { ((CvContour*)tmpseq)->color = avgVal[0] + (avgVal[1] << 8) + (avgVal[2] << 16); tmpseq->h_prev = prev_seq; if (prev_seq) { prev_seq->h_next = tmpseq; } else { root = tmpseq; } prev_seq = tmpseq; } } icvSegmFloodFill_Stage2(dst->data.ptr, dst->step, mask->data.ptr, mask->step, size, avgVal, region.rect); } } } __END__; //cvReleaseMat( &temp ); cvReleaseMat(&gray); cvReleaseMat(&canny); cvFree(&stack); return root; }
void TextDetector::detect(IplImage * input, const struct TextDetectionParams ¶ms, std::vector<Chain> &chains, std::vector<std::pair<Point2d, Point2d> > &compBB, std::vector<std::pair<CvPoint, CvPoint> > &chainBB) { assert(input->depth == IPL_DEPTH_8U); assert(input->nChannels == 3); // Convert to grayscale IplImage * grayImage = cvCreateImage(cvGetSize(input), IPL_DEPTH_8U, 1); cvCvtColor(input, grayImage, CV_RGB2GRAY); // Create Canny Image double threshold_low = 175; double threshold_high = 320; IplImage * edgeImage = cvCreateImage(cvGetSize(input), IPL_DEPTH_8U, 1); cvCanny(grayImage, edgeImage, threshold_low, threshold_high, 3); cvSaveImage("canny.png", edgeImage); // Create gradient X, gradient Y IplImage * gaussianImage = cvCreateImage(cvGetSize(input), IPL_DEPTH_32F, 1); cvConvertScale(grayImage, gaussianImage, 1. / 255., 0); cvSmooth(gaussianImage, gaussianImage, CV_GAUSSIAN, 5, 5); IplImage * gradientX = cvCreateImage(cvGetSize(input), IPL_DEPTH_32F, 1); IplImage * gradientY = cvCreateImage(cvGetSize(input), IPL_DEPTH_32F, 1); cvSobel(gaussianImage, gradientX, 1, 0, CV_SCHARR); cvSobel(gaussianImage, gradientY, 0, 1, CV_SCHARR); cvSmooth(gradientX, gradientX, 3, 3); cvSmooth(gradientY, gradientY, 3, 3); cvReleaseImage(&gaussianImage); // Calculate SWT and return ray vectors std::vector<Ray> rays; IplImage * SWTImage = cvCreateImage(cvGetSize(input), IPL_DEPTH_32F, 1); for (int row = 0; row < input->height; row++) { float* ptr = (float*) (SWTImage->imageData + row * SWTImage->widthStep); for (int col = 0; col < input->width; col++) { *ptr++ = -1; } } strokeWidthTransform(edgeImage, gradientX, gradientY, params, SWTImage, rays); cvSaveImage("SWT_0.png", SWTImage); SWTMedianFilter(SWTImage, rays); cvSaveImage("SWT_1.png", SWTImage); IplImage * output2 = cvCreateImage(cvGetSize(input), IPL_DEPTH_32F, 1); normalizeImage(SWTImage, output2); cvSaveImage("SWT_2.png", output2); IplImage * saveSWT = cvCreateImage(cvGetSize(input), IPL_DEPTH_8U, 1); cvConvertScale(output2, saveSWT, 255, 0); cvSaveImage("SWT.png", saveSWT); cvReleaseImage(&output2); cvReleaseImage(&saveSWT); // Calculate legally connected components from SWT and gradient image. // return type is a vector of vectors, where each outer vector is a component and // the inner vector contains the (y,x) of each pixel in that component. std::vector<std::vector<Point2d> > components = findLegallyConnectedComponents(SWTImage, rays); // Filter the components std::vector<std::vector<Point2d> > validComponents; std::vector<Point2dFloat> compCenters; std::vector<float> compMedians; std::vector<Point2d> compDimensions; filterComponents(SWTImage, components, validComponents, compCenters, compMedians, compDimensions, compBB, params); IplImage * output3 = cvCreateImage(cvGetSize(input), 8U, 3); renderComponentsWithBoxes(SWTImage, validComponents, compBB, output3); cvSaveImage("components.png", output3); cvReleaseImage ( &output3 ); // Make chains of components chains = makeChains(input, validComponents, compCenters, compMedians, compDimensions, params); IplImage * output = cvCreateImage(cvGetSize(grayImage), IPL_DEPTH_8U, 3); renderChainsWithBoxes(SWTImage, validComponents, chains, compBB, chainBB, output); cvSaveImage("text-boxes.png", output); std::cout << "-------- detect end --------" << std::endl; cvReleaseImage(&output); cvReleaseImage(&gradientX); cvReleaseImage(&gradientY); cvReleaseImage(&SWTImage); cvReleaseImage(&edgeImage); cvReleaseImage(&grayImage); return; }
t_jit_err cv_jit_lines_matrix_calc(t_cv_jit_lines *x, void *inputs, void *outputs) { t_jit_err err = JIT_ERR_NONE; long in_savelock,out_savelock; t_jit_matrix_info in_minfo,out_minfo; char *in_bp,*out_bp; long i,dimcount,dim[JIT_MATRIX_MAX_DIMCOUNT]; void *in_matrix,*out_matrix; t_int32 *out; double thresh1, thresh2, theta, rho; int houghThresh; CvMat source; CvPoint *ln; in_matrix = jit_object_method(inputs,_jit_sym_getindex,0); out_matrix = jit_object_method(outputs,_jit_sym_getindex,0); if (x&&in_matrix&&out_matrix) { in_savelock = (long) jit_object_method(in_matrix,_jit_sym_lock,1); out_savelock = (long) jit_object_method(out_matrix,_jit_sym_lock,1); jit_object_method(in_matrix,_jit_sym_getinfo,&in_minfo); jit_object_method(out_matrix,_jit_sym_getinfo,&out_minfo); jit_object_method(in_matrix,_jit_sym_getdata,&in_bp); if (!in_bp) { err=JIT_ERR_INVALID_INPUT; goto out;} //compatible types? if (in_minfo.type!=_jit_sym_char) { err=JIT_ERR_MISMATCH_TYPE; goto out; } //compatible planes? if ((in_minfo.planecount!=1)||(out_minfo.planecount!=4)) { err=JIT_ERR_MISMATCH_PLANE; goto out; } //get dimensions/planecount dimcount = in_minfo.dimcount; for (i=0;i<dimcount;i++) { dim[i] = MIN(in_minfo.dim[i],out_minfo.dim[i]); } //Convert input matrix to OpenCV matrices cvJitter2CvMat(in_matrix, &source); //Adjust size of edge matrix if need be if((x->edges->rows != source.rows)||(x->edges->cols != source.cols)) { cvReleaseMat(&(x->edges)); x->edges = cvCreateMat( source.rows, source.cols, CV_8UC1 ); } //Calculate parameter values for Hough and Canny algorithms thresh1 = x->threshold - THRESHOLD_RANGE; thresh2 = x->threshold + THRESHOLD_RANGE; CLIP_ASSIGN(thresh1,0,255); CLIP_ASSIGN(thresh2,0,255); theta = CV_PI / (180 / (double)x->resolution); rho = (double)x->resolution; houghThresh = x->sensitivity; x->gap = MAX(0,x->gap); x->length = MAX(0,x->length); //calculate edges using Canny algorithm cvCanny( &source, x->edges, thresh1, thresh2, 3 ); //Find lines using the probabilistic Hough transform method x->lines = cvHoughLines2( x->edges, x->storage, CV_HOUGH_PROBABILISTIC, rho, theta, houghThresh, x->length, x->gap ); //Transfer line information to output matrix //First adjust matrix size out_minfo.dim[0] = x->lines->total; jit_object_method(out_matrix,_jit_sym_setinfo,&out_minfo); jit_object_method(out_matrix,_jit_sym_getinfo,&out_minfo); jit_object_method(out_matrix,_jit_sym_getdata,&out_bp); if (!out_bp) { err=JIT_ERR_INVALID_OUTPUT; goto out;} //Copy... out = (t_int32 *)out_bp; for( i = 0; i < x->lines->total; i++ ) { ln = (CvPoint*)cvGetSeqElem(x->lines,i); out[0] = ln[0].x; out[1] = ln[0].y; out[2] = ln[1].x; out[3] = ln[1].y; out+=4; } } else { return JIT_ERR_INVALID_PTR; } out: jit_object_method(out_matrix,gensym("lock"),out_savelock); jit_object_method(in_matrix,gensym("lock"),in_savelock); return err; }
void CV_CannyTest::run_func() { cvCanny( test_array[INPUT][0], test_array[OUTPUT][0], threshold1, threshold2, aperture_size + (use_true_gradient ? CV_CANNY_L2_GRADIENT : 0)); }
void check_glue_bottle( IplImage* original_image, IplImage* result_image ) { // TO-DO: Inspect the image of the glue bottle passed. This routine should check a number of rows as specified by // FIRST_LABEL_ROW_TO_CHECK, LAST_LABEL_ROW_TO_CHECK and ROW_STEP_FOR_LABEL_CHECK. If any of these searches // fail then "No Label" should be written on the result image. Otherwise if all left and right column values // are roughly the same "Label Present" should be written on the result image. Otherwise "Label crooked" should // be written on the result image. // To implement this you may need to use smoothing (cv::GaussianBlur() perhaps) and edge detection (cvCanny() perhaps). // You might also need cvConvertImage() which converts between different types of image. IplImage* grayscale_image = cvCreateImage( cvGetSize(original_image), 8, 1 ); cvConvertImage( original_image, grayscale_image ); IplImage* temp = cvCloneImage(grayscale_image); //GAUSSIAN SMOOTH. cvSmooth(temp, grayscale_image, CV_GAUSSIAN,7,7,0,0); //find the edge pixels. cvCanny(grayscale_image,grayscale_image,20,100); int temp_edge_left = 0; int temp_edge_right = 0; int label_flag = false; int i = 0; int row = 0; for(row = FIRST_LABEL_ROW_TO_CHECK,i = 0;row <= LAST_LABEL_ROW_TO_CHECK;i++, row += ROW_STEP_FOR_LABEL_CHECK ) { if(!find_label_edges(grayscale_image,grayscale_image,row,temp_edge_left,temp_edge_right)) { //no label found. label_flag = false; break; } else { //label in the image and store the pixel coordinates. label_edge_left[i] = temp_edge_left; label_edge_right[i] = temp_edge_right; label_flag = true; } } int width_step =grayscale_image->widthStep; int pixel_step =grayscale_image->widthStep/grayscale_image->width; int result_width_step =result_image->widthStep; int result_pixel_step =result_image->widthStep/result_image->width; cvZero(result_image); unsigned char white_pixel[4] = {255,255,255,0}; unsigned char yellow_pixel[4] = {255,255,0,0}; unsigned char red_pixel[4] = {255,0,0,0}; unsigned char blue_pixel[4] = {0,0,255,0}; //convert the gray scale image to a RGB image. for (row=0; row < grayscale_image->height; row++) { for (int col=0; col < grayscale_image->width; col++) { unsigned char* curr_point = GETPIXELPTRMACRO( grayscale_image, col, row, width_step, pixel_step ); if(curr_point[0] == 255) { PUTPIXELMACRO( result_image, col, row, white_pixel, result_width_step, result_pixel_step, 4 ); } } } int edge_flag = 0; int temp_col = 0; //detect the edge pixels of bottle and label respectively. for(row = FIRST_LABEL_ROW_TO_CHECK;row <= LAST_LABEL_ROW_TO_CHECK;row++) { for (int col=0; col < result_image->width; col++) { unsigned char* curr_point = GETPIXELPTRMACRO( result_image, col, row, result_width_step, result_pixel_step ); if(curr_point[0] == 255) { //an edge pixel. if(edge_flag == 0) { //the firest edge pixel from left to right. it is the bottle edge pixel. PUTPIXELMACRO( result_image, col, row, red_pixel, result_width_step, result_pixel_step, 4 ); temp_col = col; //store the current col value. edge_flag++; } else if(edge_flag == 1) { //the seconde edge pixel from left to right. it is the lable edge pixel if the spacing between to edge pixel is less than // a specific value(in case of taking other edge of the bottle as the label edge in an image without label) if(abs(temp_col - col) > 100) { temp_col = 0; edge_flag = 0; break; } else { //is a label pixel. PUTPIXELMACRO( result_image, col, row, blue_pixel, result_width_step, result_pixel_step, 4 ); edge_flag = 0; temp_col = 0; break; } } } } } //reset the temp col value and do the same thing from the opposite direction. temp_col = 0; for(row = FIRST_LABEL_ROW_TO_CHECK;row <= LAST_LABEL_ROW_TO_CHECK;row++) { for (int col=result_image->width; col > 0 ; col--) { unsigned char* curr_point = GETPIXELPTRMACRO( result_image, col, row, result_width_step, result_pixel_step ); if(curr_point[0] == 255) { if(edge_flag == 0) { PUTPIXELMACRO( result_image, col, row, red_pixel, result_width_step, result_pixel_step, 4 ); temp_col = col; edge_flag++; } else if(edge_flag == 1) { if(abs(temp_col - col) > 100) { temp_col = 0; edge_flag = 0; break; } else { PUTPIXELMACRO( result_image, col, row, blue_pixel, result_width_step, result_pixel_step, 4 ); edge_flag = 0; temp_col = 0; break; } } } } } //draw yellow pixels on the intersections. for(row = FIRST_LABEL_ROW_TO_CHECK,i = 0;row <= LAST_LABEL_ROW_TO_CHECK;i++, row += ROW_STEP_FOR_LABEL_CHECK ) { PUTPIXELMACRO( result_image, label_edge_left[i], row, yellow_pixel, result_width_step, result_pixel_step, 4 ); PUTPIXELMACRO( result_image, label_edge_right[i], row, yellow_pixel, result_width_step, result_pixel_step, 4 ); } if(!label_flag) { write_text_on_image(result_image,0,0,"No Label"); } else { //if the deviation of 6 edge pixels is greater than a specific value, the label is crooked. if(abs(label_edge_left[5] - label_edge_left[0] )> DEVIATION) { write_text_on_image(result_image,0,0,"Crooked Label"); } else { //label position is appropriate. write_text_on_image(result_image,0,0,"Label Presentation"); } } }
int main(int argc, char** argv) { int i = 0; const char * tpl = "frames/frame_%d.png"; char filename[50]; // Crea una ventana llamada Original Image con un tamaño predeterminado. cvNamedWindow("Camarita", CV_WINDOW_AUTOSIZE); // Crea la conexion con la Webcam. CvCapture* capture = cvCreateCameraCapture(0); // Variable donde se almazenara el frame sacado de la webcam. IplImage* original; IplImage* output; // Hago que el ancho del capture sea de 320px if(cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 320)) { // Ojo! esta condicion puede ser problematica si tu webcam no soporta el ancho de 320 pixels while(1) { // Pongo el frame capturado dentro de la imagen original. original = cvQueryFrame(capture); if(!original) break; output = cvCreateImage(cvGetSize(original), IPL_DEPTH_8U, 3); // Convert to gray IplImage* gray = cvCreateImage(cvGetSize(original), IPL_DEPTH_8U, 1); cvCvtColor(original, gray, CV_BGR2GRAY); // Hago que se vea el frame dentro de la ventana "Original Image". // cvSmooth(original, output, CV_BLUR, 7, 7, 0, 0); // cvSmooth(original, output, CV_GAUSSIAN, 27, 27, 15, 15); cvCanny(gray, gray, 30, 80, 3); // Show the image cvShowImage("Camarita", gray); // if(!(i%60)) { // sprintf(filename, tpl, i); // cvSaveImage(filename, gray, 0); // } // Espero a que me pulsen el ESC para salir del bucle infinito. char c = cvWaitKey(10); if( c == 27 ) break; i++; } } // Libera la memoria utilizada por la variable capture. cvReleaseCapture(&capture); // Destruye la ventana "Original Image". cvDestroyWindow("Camarita"); return 0; }
IplImage *preImg(std::string caminho) { int quadratico = 200; CvSize tamanho = cvSize(quadratico, quadratico); IplImage *in = cvLoadImage(caminho.c_str(), CV_LOAD_IMAGE_GRAYSCALE); IplImage *src = cvCreateImage(tamanho, in->depth, in->nChannels); IplImage *dst = cvCreateImage(tamanho, in->depth, in->nChannels); IplImage *fn = cvCreateImage(cvSize(mh, mw), in->depth, in->nChannels); cvResize(in, src); cvThreshold(src, src, 220, 255, CV_THRESH_BINARY); cvShowImage("tresh", src); cvCanny(src, src, 100, 120, 3); //cvShowImage("canny", src); cvMorphologyEx(src, src, 0, cvCreateStructuringElementEx(4, 4, 0, 0, CV_SHAPE_RECT), cv::MORPH_DILATE, 1); //cvShowImage("Dilatacao", src); std::vector<CvPoint> pontos; for (int y = 0; y < src->height; y++) { for (int x = 0; x < src->width; x++) { if (cvGet2D(src, x, y).val[0] == 255) { //inversão dos eixos pontos.push_back(cvPoint(y, x)); } } } std::sort(pontos.begin(), pontos.end(), sortPontos); CvPoint interpol = getInterpolado(pontos[0], pontos[pontos.size() - 1]); // CvScalar color = cvScalar(255, 255, 255); // int radius = 6; // int thickness = 2; // // cvCircle(src, pontos[0], radius, color, thickness); // // cvCircle(src, pontos[pontos.size() - 1], radius, color, thickness); //cvCircle(src, interpol, radius, color, thickness); // std::cout << cvGetReal2D(src, pontos.begin()->x, pontos.begin()->y) // << std::endl; // cvShowImage("teste", src); //----------------------------- cvLogPolar(src, dst, cvPoint2D32f(interpol.x, interpol.y), 40, CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS); //cvNamedWindow("log-polar", 1); //cvShowImage("log-polar", dst); //cvShowImage("LogPolar",dst); cvResize(dst, fn); //cvShowImage("teste saida", fn); return fn; }
// -------------------------------------------------------------------------- // main(Number of arguments, Argument values) // Description : This is the entry point of the program. // Return value : SUCCESS:0 ERROR:-1 // -------------------------------------------------------------------------- int main(int argc, char **argv) { // AR.Drone class ARDrone ardrone; // Initialize if (!ardrone.open()) { printf("Failed to initialize.\n"); return -1; } // Get a image IplImage* image = ardrone.getImage(); // Images IplImage *gray = cvCreateImage(cvGetSize(image), image->depth, 1); IplImage *smooth = cvCreateImage(cvGetSize(image), image->depth, 1); IplImage *canny = cvCreateImage(cvGetSize(image), image->depth, 1); // Canny thresholds int th1 = 50, th2 = 100; cvNamedWindow("canny"); cvCreateTrackbar("th1", "canny", &th1, 255); cvCreateTrackbar("th2", "canny", &th2, 255); // Main loop while (1) { // Key input int key = cvWaitKey(1); if (key == 0x1b) break; // Update if (!ardrone.update()) break; // Get an image image = ardrone.getImage(); // Convert to gray scale cvCvtColor(image, gray, CV_BGR2GRAY); // De-noising cvSmooth(gray, smooth, CV_GAUSSIAN, 23, 23); // Detect edges cvCanny(smooth, canny, th1, th2, 3); // Detect circles CvMemStorage *storage = cvCreateMemStorage(0); CvSeq *circles = cvHoughCircles(smooth, storage, CV_HOUGH_GRADIENT, 1.0, 10.0, MAX(th1,th2), 20); // Draw circles for (int i = 0; i < circles->total; i++) { float *p = (float*) cvGetSeqElem(circles, i); cvCircle(image, cvPoint(cvRound(p[0]), cvRound(p[1])), cvRound(p[2]), CV_RGB(0,255,0), 3, 8, 0); } // Release memory cvReleaseMemStorage(&storage); // Change camera static int mode = 0; if (key == 'c') ardrone.setCamera(++mode%4); // Display the image cvShowImage("camera", image); cvShowImage("canny", canny); } // Release memories cvReleaseImage(&gray); cvReleaseImage(&smooth); cvReleaseImage(&canny); // See you ardrone.close(); return 0; }
void extractSegments (IplImage *imagen, tpRectas *rectas, int longitud_minima) { int i,j,modulo,d=0,k,n,total,etiqueta=1,columna,fila,maxX,maxY,minX,minY; int **direction; int dir1=0,dir2=0,dir3=0,dir4=0,dir5=0,dir6=0,dir7=0,dir8=0; double angulo,theta,conf,r; int edge1[TAMEDGE]; float zmx,zmy,auxX,auxY,zmxy; tpRecta nuevaRecta; CvPoint punto; IplImage *contornos,*im_gaus,*dx,*dy,*etiquetas,*dir_im; CvSize tamano; //indices=(int *)malloc(sizeof(int)*50000); direction=(int **)malloc(9*sizeof(int*)); for (i=0; i<9; i++) { direction[i]=(int *)malloc(sizeof(int)*MAX_INDICE); } //desenfocar la imagen aplicando filtro de gaus im_gaus= cvCreateImage(cvGetSize(imagen), IPL_DEPTH_8U, 1); cvZero(im_gaus); cvSmooth(imagen, im_gaus, CV_GAUSSIAN, 7, 7, 1.5, 0); tamano=cvGetSize(imagen); contornos= cvCreateImage(tamano, IPL_DEPTH_8U, 1); cvZero(contornos); //aplicar la mascara de canny cvCanny(im_gaus, contornos, 75,300 , 5); //como las mascra es de 5, borrar 2 filas y 2 columnas;Zona de BASURA for (j=0; j<tamano.width; j++) { for (i=0; i<2; i++) { CV_IMAGE_ELEM(contornos,uchar,i,j)=0; CV_IMAGE_ELEM(contornos,uchar,(tamano.height-1-i),j)=0; } } for (i=0; i<tamano.height; i++) { for (j=0; j<2; j++) { CV_IMAGE_ELEM(contornos,uchar,i,j)=0; CV_IMAGE_ELEM(contornos,uchar,i,tamano.width-1-j)=0; } } //variables para el calculo gradiente y orienacion dx = cvCreateImage(tamano, IPL_DEPTH_32F, 1); dy = cvCreateImage(tamano, IPL_DEPTH_32F, 1); cvZero(dx); cvZero(dy); calculoGradientes(im_gaus,tamano.height,tamano.width, dx, dy); cvReleaseImage(&im_gaus); //recorrer por filas for (i=0; i<tamano.height; i++) { for (j=0; j<tamano.width; j++) { if (CV_IMAGE_ELEM(contornos,uchar,i,j)!= 0) { //almacenar el indice de los contornos //indices[numIndices]=INDICE(i,j,tamano.width); //numIndices++; if (( CV_IMAGE_ELEM(dx,double,i,j)==0)) { //CV_IMAGE_ELEM(dx,double,i,j)=0.000000001; angulo=atan((double)CV_IMAGE_ELEM(dy,float,i,j)/0.00001); }else //calcular la orientaciones del gradiente angulo=atan((double)CV_IMAGE_ELEM(dy,float,i,j)/(double)CV_IMAGE_ELEM(dx,float,i,j)); //asignar a cada orientacion del gradiente un conjuto if ((angulo >= (pi/16.0))&&(angulo<(3*pi/16.0))) modulo=1; else if ((angulo >= (3*pi/16.0))&&(angulo<(5*pi/16.0))) modulo=2; else if ((angulo >= (5*pi/16.0))&&(angulo<(7*pi/16.0))) modulo=3; else if(((angulo >= (7*pi/16.0))||(angulo<-(7*pi/16.0)))) modulo=4; else if ((angulo >= -(7*pi/16.0))&&(angulo<-(5*pi/16.0))) modulo=5; else if ((angulo >= -(5*pi/16.0))&&(angulo<-(3*pi/16.0))) modulo=6; else if ((angulo >= -(3*pi/16.0))&&(angulo<-(pi/16.0))) modulo=7; else if ((angulo >= -(pi/16.0))&&(angulo<(pi/16.0))) modulo=8; else modulo=8; d=seleccionFilaDeConjunto(modulo,&dir1,&dir2,&dir3,&dir4,&dir5,&dir6,&dir7,&dir8); anyadirIndiceAconjunto(direction, (int)modulo,INDICE(i,j,tamano.width),d); }//if //else cvSet2D(final,y,x,CV_RGB(0,0,0)); }//for y }//for x
int main (int argc, const char * argv[]){ IplImage* rawImage = NULL; // Original Image IplImage** buf = NULL; IplImage* blur = NULL; // Blur IplImage* bw = NULL; // Black & White IplImage* canny = NULL; // Detección de Border IplImage* silhouette = NULL; // Silhouette to calc. Motion History Image IplImage* mhi = NULL; // Motion History Image IplImage* mask = NULL; // Motion History Gradient IplImage* orient = NULL; // Motion History Gradient const double MHI_DURATION = 1; const double MAX_TIME_DELTA = 0.5; const double MIN_TIME_DELTA = 0.05; double timestamp = 0.1; int numeroFrames = 0; int keyPressed = 0; int ancho = 0, alto = 0, umbral1 = 0, umbral2 = 30, apertura = 3; // Detección de Bordr int currBufIndex = 0; CvSize size; // Fuente de Captura (Camara/Video) printf("Capture from camera\n"); CvCapture* capture = 0; capture = cvCaptureFromCAM( 0 ); // capture = cvCaptureFromFile( argv[1] ); // Fuentes de Display cvNamedWindow( "WithoutBorder", CV_WINDOW_AUTOSIZE ); // Loop Principal while(TRUE){ // Obtengo un Frame ++numeroFrames; rawImage = cvQueryFrame( capture ); size = cvGetSize(rawImage); /* * To Black and White */ bw = cvCreateImage(size, IPL_DEPTH_8U, 1); cvConvertImage(rawImage, bw, 0); /* * Blur Images */ blur = cvCreateImage(size, IPL_DEPTH_8U, 1); cvSmooth( bw, blur, CV_BLUR, 11, 11); /* * Detección de bordes * Gracias a http://redstar.linaresdigital.com/robotica/filtro_canny.c */ canny = cvCreateImage(size, IPL_DEPTH_8U, 1); cvCanny(blur, canny, umbral1, umbral2, apertura); /* * MHI */ if(!mhi){ cvReleaseImage( &mhi ); mhi = cvCreateImage(size, IPL_DEPTH_32F, 1); cvZero(mhi); } if(!silhouette){ cvReleaseImage( &silhouette ); silhouette = cvCreateImage(size, IPL_DEPTH_8U, 1); cvZero(silhouette); } if( buf == NULL ) { buf = (IplImage**)malloc(2*sizeof(buf[0])); memset( buf, 0, 2*sizeof(buf[0])); for(int i = 0; i < 2; i++ ) { cvReleaseImage( &buf[i] ); buf[i] = cvCreateImage(size, IPL_DEPTH_8U, 1 ); cvZero( buf[i] ); } } cvCopy(canny, buf[currBufIndex]); currBufIndex = (currBufIndex + 1) % 2; // Me voy moviendo entre el buffer cvAbsDiff( canny, buf[currBufIndex], silhouette ); timestamp = (double)clock()/CLOCKS_PER_SEC; cvUpdateMotionHistory( silhouette, mhi, timestamp, MHI_DURATION ); /* * MHG */ if(!mask || ! orient){ cvReleaseImage( &mask ); mask = cvCreateImage(size, IPL_DEPTH_8U, 1); cvZero(mask); cvReleaseImage( &orient ); orient = cvCreateImage(size, IPL_DEPTH_8U, 1); cvZero(orient); } cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 ); cvShowImage("WithoutBorder", mhi); /* * Display * Ahora calculamos el área de la imagen donde queremos pegar la miniatur * ancho = rawImage->width / 4; alto = rawImage->height / 4; cvSetImageROI(canny, cvRect(rawImage->width - (ancho * 1.1) , rawImage->height - (alto * 1.1), ancho, alto)); // Ahora volcamos el contenido del fotograma a la zona deseada de la copia cvResize(bn, canny, CV_INTER_LINEAR); // Si no deshacemos la región de interés sólo veremos la zona recién copiada cvResetImageROI(canny); // Mostramos la imagen escalada cvShowImage("WithoutBorder", canny); */ // Leo el teclado para posibles señales de salida, esperamos 100 ms keyPressed = cvWaitKey(10); if(keyPressed == 27 || keyPressed == 'q'){ break; } } // Destruyo Elementos cvDestroyWindow( "WithoutBorder" ); cvReleaseImage( &rawImage ); return 0; }
int main(int argc, char* argv[]) { const char* name = "Edge Detection Window"; // Kernel size int N = 7; // Set up original image IplImage* org_img = cvLoadImage( argv[1], 0 ); // resize original image IplImage* img = cvCreateImage(cvSize(512,512),org_img->depth, org_img->nChannels); cvResize(org_img, img); // created final image IplImage* img_b = cvCreateImage( cvSize(img->width+N-1,img->height+N-1), img->depth, img->nChannels ); IplImage* out = cvCreateImage( cvGetSize(img_b), IPL_DEPTH_8U, img_b->nChannels ); // Add convolution boarders CvPoint offset = cvPoint((N-1)/2,(N-1)/2); cvCopyMakeBorder(img, img_b, offset, IPL_BORDER_REPLICATE, cvScalarAll(0)); // Make window cvNamedWindow( name, 1 ); // Edge Detection Variables int aperature_size = N; double lowThresh = 20; double highThresh = 40; // Create trackbars cvCreateTrackbar( "High", name, &high_switch_value, 4, switch_callback_h ); cvCreateTrackbar( "Low", name, &low_switch_value, 4, switch_callback_l ); while( 1 ) { switch( highInt ){ case 0: highThresh = 200; break; case 1: highThresh = 400; break; case 2: highThresh = 600; break; case 3: highThresh = 800; break; case 4: highThresh = 1000; break; } switch( lowInt ){ case 0: lowThresh = 0; break; case 1: lowThresh = 100; break; case 2: lowThresh = 200; break; case 3: lowThresh = 400; break; case 4: lowThresh = 600; break; } // Edge Detection cvCanny( img_b, out, lowThresh*N*N, highThresh*N*N, aperature_size ); cvShowImage(name, out); if( cvWaitKey( 15 ) == 27 ) break; } // Release cvReleaseImage( &img ); cvReleaseImage( &img_b ); cvReleaseImage( &out ); cvDestroyWindow( name ); return 0; }
/** Returns a CvSeq (An OpenCV sequence) of Tetris pieces detected in an image. Based on the OpenCV example of identifying a square. Modified to detect L-shaped Tetris pieces. Effectiveness dependent upon thresholds of edge dectection and camera being positioned orthogonal to the Tetris piece. */ CvSeq* Camera::findTetris( IplImage* img, CvMemStorage* storage ) { thresh = 50; CvSeq* contours; int i, c, l, N = 11; CvSize sz = cvSize( img->width & -2, img->height & -2 ); /// Copy of image so that the detection is non-destructive IplImage* timg = cvCloneImage( img ); /// Gray scale needed IplImage* gray = cvCreateImage( sz, 8, 1 ); /// Smaller version to do scaling IplImage* pyr = cvCreateImage( cvSize(sz.width/2, sz.height/2), 8, 3 ); IplImage* tgray; CvSeq* result; double s, t; // create empty sequence that will contain points - /// 6 points per tetris piece (the vertices) CvSeq* tetrisPieces = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage ); // select the maximum region of interest (ROI) in the image // with the width and height divisible by 2. What is the biggest // size of the object. cvSetImageROI( timg, cvRect( 0, 0, sz.width, sz.height )); // down-scale and upscale the image to filter out the noise // I get the filter, but why down and upscale? cvPyrDown( timg, pyr, 7 ); cvPyrUp( pyr, timg, 7 ); tgray = cvCreateImage( sz, 8, 1 ); /// find pieces in every color plane of the image for( c = 0; c < 3; c++ ) { /// extract the c-th color plane cvSetImageCOI( timg, c+1 ); cvCopy( timg, tgray, 0 ); /// try several threshold levels for( l = 0; l < N; l++ ) { /// hack: use Canny instead of zero threshold level. /// Canny helps to catch tetrisPieces with gradient shading if( l == 0 ) { // apply Canny. Take the upper threshold from slider // and set the lower to 0 (which forces edges merging) cvCanny( tgray, gray, 50, 120, 5 ); // dilate canny output to remove potential // holes between edge segments cvDilate( gray, gray, 0, 1 ); } else { // apply threshold if l!=0: // tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0 cvThreshold( tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY ); } // find contours and store them all as a list cvFindContours( gray, storage, &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) ); // test each contour while( contours ) { // approximate contour with accuracy proportional // to the contour perimeter result = cvApproxPoly( contours, sizeof(CvContour), storage, CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 ); /* Tetris pieces have 6 vertices. The approximation of large * area is used to filter out "noisy contours." // Note: absolute value of an area is used because // area may be positive or negative - in accordance with the // contour orientation*/ if( result->total == 6 && fabs(cvContourArea(result,CV_WHOLE_SEQ)) > 1000 && fabs(cvContourArea(result,CV_WHOLE_SEQ)) < 10000 ) { s = 0; for( i = 0; i < 7; i++ ) { // find minimum angle between joint // edges (maximum of cosine) if( i >= 2 ) { t = fabs(angle( (CvPoint*)cvGetSeqElem( result, i ), (CvPoint*)cvGetSeqElem( result, i-2 ), (CvPoint*)cvGetSeqElem( result, i-1 ))); s = s > t ? s : t; } } // if cosines of all angles are small // (all angles are ~90 degree) then write quandrange // vertices to resultant sequence if( s < 0.3 ) for( i = 0; i < 6; i++ ) cvSeqPush( tetrisPieces, (CvPoint*)cvGetSeqElem( result, i )); } // take the next contour contours = contours->h_next; } } } // release all the temporary images cvReleaseImage( &gray ); cvReleaseImage( &pyr ); cvReleaseImage( &tgray ); cvReleaseImage( &timg ); return tetrisPieces; }
// returns sequence of squares detected on the image. // the sequence is stored in the specified memory storage CvSeq* findSquares4( IplImage* img, CvMemStorage* storage ) { CvSeq* contours; int i, c, l, N = 11; CvSize sz = cvSize( img->width & -2, img->height & -2 ); IplImage* timg = cvCloneImage( img ); // make a copy of input image IplImage* gray = cvCreateImage( sz, 8, 1 ); IplImage* pyr = cvCreateImage( cvSize(sz.width/2, sz.height/2), 8, 3 ); IplImage* tgray; CvSeq* result; double s, t; // create empty sequence that will contain points - // 4 points per square (the square's vertices) CvSeq* squares = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage ); // select the maximum ROI in the image // with the width and height divisible by 2 cvSetImageROI( timg, cvRect( 0, 0, sz.width, sz.height )); // down-scale and upscale the image to filter out the noise cvPyrDown( timg, pyr, 7 ); cvPyrUp( pyr, timg, 7 ); tgray = cvCreateImage( sz, 8, 1 ); // find squares in every color plane of the image for( c = 0; c < 3; c++ ) { // extract the c-th color plane cvSetImageCOI( timg, c+1 ); cvCopy( timg, tgray, 0 ); // try several threshold levels for( l = 0; l < N; l++ ) { // hack: use Canny instead of zero threshold level. // Canny helps to catch squares with gradient shading if( l == 0 ) { // apply Canny. Take the upper threshold from slider // and set the lower to 0 (which forces edges merging) cvCanny( tgray, gray, 0, thresh, 5 ); // dilate canny output to remove potential // holes between edge segments cvDilate( gray, gray, 0, 1 ); } else { // apply threshold if l!=0: // tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0 cvThreshold( tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY ); } // find contours and store them all as a list cvFindContours( gray, storage, &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) ); // test each contour while( contours ) { // approximate contour with accuracy proportional // to the contour perimeter result = cvApproxPoly( contours, sizeof(CvContour), storage, CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 ); // square contours should have 4 vertices after approximation // relatively large area (to filter out noisy contours) // and be convex. // Note: absolute value of an area is used because // area may be positive or negative - in accordance with the // contour orientation if( result->total == 4 && cvContourArea(result,CV_WHOLE_SEQ,0) > 500 && cvCheckContourConvexity(result) ) { s = 0; for( i = 0; i < 5; i++ ) { // find minimum angle between joint // edges (maximum of cosine) if( i >= 2 ) { t = fabs(angle( (CvPoint*)cvGetSeqElem( result, i ), (CvPoint*)cvGetSeqElem( result, i-2 ), (CvPoint*)cvGetSeqElem( result, i-1 ))); s = s > t ? s : t; } } // if cosines of all angles are small // (all angles are ~90 degree) then write quandrange // vertices to resultant sequence if( s < 0.3 ) for( i = 0; i < 4; i++ ) cvSeqPush( squares, (CvPoint*)cvGetSeqElem( result, i )); } // take the next contour contours = contours->h_next; } } } // release all the temporary images cvReleaseImage( &gray ); cvReleaseImage( &pyr ); cvReleaseImage( &tgray ); cvReleaseImage( &timg ); return squares; }
void MouthContours::execute(IplImage* img, IplImage* drw, CvRect mouthSearch){ CvSeq* contours; if(CV_IS_IMAGE(imgGrey)){ cvReleaseImage(&imgGrey); } if(CV_IS_IMAGE(imgTempl)){ cvReleaseImage(&imgTempl); } allocateOnDemand( &storageTeeth ); allocateOnDemand( &imgTempl, cvSize( img->width, img->height ), IPL_DEPTH_8U, 3 ); cvCopy( img, imgTempl, 0 ); allocateOnDemand( &imgGrey, cvSize( img->width, img->height ), IPL_DEPTH_8U, 1 ); if(CV_IS_STORAGE((storageTeeth))){ contours = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour), sizeof(CvPoint), storageTeeth ); cvCvtColor( imgTempl, imgGrey, CV_BGR2GRAY ); int sigma = 1; int ksize = (sigma*5)|1; cvSetImageROI(imgGrey, mouthSearch); cvSetImageROI(drw, mouthSearch); cvSmooth( imgGrey , imgGrey, CV_GAUSSIAN, ksize, ksize, sigma, sigma); //cvEqualizeHist( small_img_grey, small_img_grey ); cvCanny( imgGrey, imgGrey, 70, 70, 3 ); cvDilate( imgGrey, imgGrey, NULL, 1 ); cvErode( imgGrey, imgGrey, NULL, 1 ); cvFindContours( imgGrey, storageTeeth, &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) ); if(CV_IS_SEQ(contours)){ contours = cvApproxPoly( contours, sizeof(CvContour), storageTeeth, CV_POLY_APPROX_DP, 5, 1 ); if( contours->total > 0 ){ for( ;contours; contours = contours->h_next ){ if( contours->total < 4 ) continue; cvDrawContours( drw, contours, CV_RGB(255,0,0), CV_RGB(0,255,0), 5, 1, CV_AA, cvPoint(0,0) ); MouthContours::TeethArcLength = cvArcLength( contours, CV_WHOLE_SEQ, -1); MouthContours::TeethAreaContour = cvContourArea( contours, CV_WHOLE_SEQ); time_t ltime; struct tm *Tm; ltime=time(NULL); Tm=localtime(<ime); MouthContours::MouthHH = Tm->tm_hour; MouthContours::MouthMM = Tm->tm_min; MouthContours::MouthSS = Tm->tm_sec; } }else{ MouthContours::MouthHH = 0; MouthContours::MouthMM = 0; MouthContours::MouthSS = 0; MouthContours::TeethArcLength = 0; MouthContours::TeethAreaContour = 0; } }else{ MouthContours::MouthHH = 0; MouthContours::MouthMM = 0; MouthContours::MouthSS = 0; MouthContours::TeethArcLength = 0; MouthContours::TeethAreaContour = 0; } cvClearMemStorage( storageTeeth ); } cvResetImageROI(imgGrey); cvResetImageROI(drw); }
CvSeq* CSquareDetection::FindSquares( IplImage* tgray ) { CvSeq* contours; int i, l, N = 11; double imgArea = tgray->width*tgray->height; CvSize sz = cvSize( tgray->width & -2, tgray->height & -2 ); IplImage* gray = cvCreateImage( sz, 8, 1 ); IplImage* pyr = cvCreateImage( cvSize(sz.width/2, sz.height/2), 8, 1 ); CvSeq* result; // create empty sequence that will contain points - // 4 points per square (the square's vertices) CvSeq* squares = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage ); // select the maximum ROI in the image // with the width and height divisible by 2 cvSetImageROI( tgray, cvRect( 0, 0, sz.width, sz.height )); // down-scale and upscale the image to filter out the noise //cvPyrDown( tgray, pyr, 7 ); //cvPyrUp( pyr, tgray, 7 ); // try several threshold levels cvCanny( tgray, gray, 0, _CannyThresh, 5 ); cvDilate( gray, gray, 0, 1 ); for( l = 1; l < N-4; l++ ) { cvThreshold( tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY ); // find contours and store them all as a list cvFindContours( gray, storage, &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) ); // test each contour while( contours ) { // approximate contour with accuracy proportional // to the contour perimeter result = cvApproxPoly( contours, sizeof(CvContour), storage, CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 ); // square contours should have 4 vertices after approximation // relatively large area (to filter out noisy contours) // and be convex. // Note: absolute value of an area is used because // area may be positive or negative - in accordance with the // contour orientation double area = fabs(cvContourArea(result,CV_WHOLE_SEQ)); if( result->total == 4 && area < _maxPropotionArea*imgArea && area > _minPropotionArea*imgArea && cvCheckContourConvexity(result) ) { // Kiem tra va sap xep lai vi tri dinh if (Check4Vertexes(result, _CosineThresh, _EdgeThresh)) { // Dau vao mang ket qua for( i = 0; i < 4; i++ ) cvSeqPush( squares,(CvPoint*)cvGetSeqElem( result, i )); } } // take the next contour contours = contours->h_next; } } // Loc lai int delta_thres = 30; int* flags = new int[squares->total/4]; for (int i = 0; i < squares->total/4; i++) flags[i] = 0; CvSeq* sqrSeq = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage ); CvPoint* V[4], *Vp[4]; for (int i = 0; i < squares->total; i+=4) { if (!flags[i/4]) { V[0] = (CvPoint*)cvGetSeqElem( squares, i ); V[1] = (CvPoint*)cvGetSeqElem( squares, i+1 ); V[2] = (CvPoint*)cvGetSeqElem( squares, i+2 ); V[3] = (CvPoint*)cvGetSeqElem( squares, i+3 ); for (int j = i+4; j < squares->total; j+= 4) { if (!flags[j/4]) { Vp[0] = (CvPoint*)cvGetSeqElem( squares, j ); Vp[1] = (CvPoint*)cvGetSeqElem( squares, j+1 ); Vp[2] = (CvPoint*)cvGetSeqElem( squares, j+2 ); Vp[3] = (CvPoint*)cvGetSeqElem( squares, j+3 ); // xac dinh trung diem CvPoint M; M.x = (Vp[0]->x+Vp[2]->x)/2; M.y = (Vp[0]->y+Vp[2]->y)/2; if (MathHelper.ktNamTrong(V, 4, &M)) { int d1 = max(MathHelper.sqrDistance(V[0], V[1]), MathHelper.sqrDistance(V[1], V[2])); int d2 = max(MathHelper.sqrDistance(Vp[0], Vp[1]), MathHelper.sqrDistance(Vp[1], Vp[2])); if ( d1 > d2) { V[0]->x = Vp[0]->x; V[0]->y = Vp[0]->y; V[1]->x = Vp[1]->x; V[1]->y = Vp[1]->y; V[2]->x = Vp[2]->x; V[2]->y = Vp[2]->y; V[3]->x = Vp[3]->x; V[3]->y = Vp[3]->y; } flags[j/4] = 1; } } } } } for (int i = 0; i < squares->total; i+=4) { if (!flags[i/4]) { V[0] = (CvPoint*)cvGetSeqElem( squares, i ); V[1] = (CvPoint*)cvGetSeqElem( squares, i+1 ); V[2] = (CvPoint*)cvGetSeqElem( squares, i+2 ); V[3] = (CvPoint*)cvGetSeqElem( squares, i+3 ); // Kiem tra co nguoc chieu kim dong ho ko // Neu khong nguoc chieu kim dong ho thi hoan doi // Chinh lai huong cua la bai Line* l = MathHelper.ptDuongThang(V[0], V[1]); if (MathHelper.thePointLenLine(l, V[3]) > 0) { int temp = V[1]->x; V[1]->x = V[3]->x; V[3]->x = temp; temp = V[1]->y; V[1]->y = V[3]->y; V[3]->y = temp; } //MathHelper.SapDongHo(V); cvSeqPush(sqrSeq, V[0]); cvSeqPush(sqrSeq, V[1]); cvSeqPush(sqrSeq, V[2]); cvSeqPush(sqrSeq, V[3]); } } //cvClearSeq(squares); // release all the temporary images cvReleaseImage( &gray ); cvReleaseImage( &pyr ); //cvReleaseImage( &tgray ); cvClearMemStorage(storage); return sqrSeq; }
static GstFlowReturn kms_crowd_detector_transform_frame_ip (GstVideoFilter * filter, GstVideoFrame * frame) { KmsCrowdDetector *crowddetector = KMS_CROWD_DETECTOR (filter); GstMapInfo info; kms_crowd_detector_initialize_images (crowddetector, frame); if ((crowddetector->priv->num_rois == 0) && (crowddetector->priv->rois != NULL)) { kms_crowd_detector_extract_rois (crowddetector); } if (crowddetector->priv->pixels_rois_counted == TRUE && crowddetector->priv->actual_image != NULL) { kms_crowd_detector_count_num_pixels_rois (crowddetector); crowddetector->priv->pixels_rois_counted = FALSE; } gst_buffer_map (frame->buffer, &info, GST_MAP_READ); crowddetector->priv->actual_image->imageData = (char *) info.data; IplImage *frame_actual_gray = cvCreateImage (cvSize (crowddetector->priv->actual_image->width, crowddetector->priv->actual_image->height), IPL_DEPTH_8U, 1); cvZero (frame_actual_gray); IplImage *actual_lbp = cvCreateImage (cvSize (crowddetector->priv->actual_image->width, crowddetector->priv->actual_image->height), IPL_DEPTH_8U, 1); cvZero (actual_lbp); IplImage *lbp_temporal_result = cvCreateImage (cvSize (crowddetector->priv->actual_image->width, crowddetector->priv->actual_image->height), IPL_DEPTH_8U, 1); cvZero (lbp_temporal_result); IplImage *add_lbps_result = cvCreateImage (cvSize (crowddetector->priv->actual_image->width, crowddetector->priv->actual_image->height), IPL_DEPTH_8U, 1); cvZero (add_lbps_result); IplImage *lbps_alpha_result_rgb = cvCreateImage (cvSize (crowddetector->priv->actual_image->width, crowddetector->priv->actual_image->height), IPL_DEPTH_8U, 3); cvSet (lbps_alpha_result_rgb, CV_RGB (0, 0, 0), 0); IplImage *actual_image_masked = cvCreateImage (cvSize (crowddetector->priv->actual_image->width, crowddetector->priv->actual_image->height), IPL_DEPTH_8U, 1); cvZero (actual_image_masked); IplImage *substract_background_to_actual = cvCreateImage (cvSize (crowddetector->priv->actual_image->width, crowddetector->priv->actual_image->height), IPL_DEPTH_8U, 1); cvZero (substract_background_to_actual); IplImage *low_speed_map = cvCreateImage (cvSize (crowddetector->priv->actual_image->width, crowddetector->priv->actual_image->height), IPL_DEPTH_8U, 1); cvZero (low_speed_map); IplImage *high_speed_map = cvCreateImage (cvSize (crowddetector->priv->actual_image->width, crowddetector->priv->actual_image->height), IPL_DEPTH_8U, 1); cvZero (high_speed_map); IplImage *actual_motion = cvCreateImage (cvSize (crowddetector->priv->actual_image->width, crowddetector->priv->actual_image->height), IPL_DEPTH_8U, 3); cvSet (actual_motion, CV_RGB (0, 0, 0), 0); IplImage *binary_actual_motion = cvCreateImage (cvSize (crowddetector->priv->actual_image->width, crowddetector->priv->actual_image->height), IPL_DEPTH_8U, 1); cvZero (binary_actual_motion); uint8_t *low_speed_pointer; uint8_t *low_speed_pointer_aux; uint8_t *high_speed_pointer; uint8_t *high_speed_pointer_aux; uint8_t *actual_motion_pointer; uint8_t *actual_motion_pointer_aux; uint8_t *binary_actual_motion_pointer; uint8_t *binary_actual_motion_pointer_aux; int w, h; if (crowddetector->priv->num_rois != 0) { cvFillPoly (actual_image_masked, crowddetector->priv->curves, crowddetector->priv->n_points, crowddetector->priv->num_rois, cvScalar (255, 255, 255, 0), CV_AA, 0); } cvCvtColor (crowddetector->priv->actual_image, frame_actual_gray, CV_BGR2GRAY); kms_crowd_detector_mask_image (frame_actual_gray, actual_image_masked, 0); if (crowddetector->priv->background == NULL) { cvCopy (frame_actual_gray, crowddetector->priv->background, 0); } else { cvAddWeighted (crowddetector->priv->background, BACKGROUND_ADD_RATIO, frame_actual_gray, 1 - BACKGROUND_ADD_RATIO, 0, crowddetector->priv->background); } kms_crowd_detector_compute_temporal_lbp (frame_actual_gray, actual_lbp, actual_lbp, FALSE); kms_crowd_detector_compute_temporal_lbp (frame_actual_gray, lbp_temporal_result, crowddetector->priv->frame_previous_gray, TRUE); cvAddWeighted (crowddetector->priv->previous_lbp, LBPS_ADD_RATIO, actual_lbp, (1 - LBPS_ADD_RATIO), 0, add_lbps_result); cvSub (crowddetector->priv->previous_lbp, actual_lbp, add_lbps_result, 0); cvThreshold (add_lbps_result, add_lbps_result, 70.0, 255.0, CV_THRESH_OTSU); cvNot (add_lbps_result, add_lbps_result); cvErode (add_lbps_result, add_lbps_result, 0, 4); cvDilate (add_lbps_result, add_lbps_result, 0, 11); cvErode (add_lbps_result, add_lbps_result, 0, 3); cvCvtColor (add_lbps_result, lbps_alpha_result_rgb, CV_GRAY2BGR); cvCopy (actual_lbp, crowddetector->priv->previous_lbp, 0); cvCopy (frame_actual_gray, crowddetector->priv->frame_previous_gray, 0); if (crowddetector->priv->acumulated_lbp == NULL) { cvCopy (add_lbps_result, crowddetector->priv->acumulated_lbp, 0); } else { cvAddWeighted (crowddetector->priv->acumulated_lbp, TEMPORAL_LBPS_ADD_RATIO, add_lbps_result, 1 - TEMPORAL_LBPS_ADD_RATIO, 0, crowddetector->priv->acumulated_lbp); } cvThreshold (crowddetector->priv->acumulated_lbp, high_speed_map, 150.0, 255.0, CV_THRESH_BINARY); cvSmooth (high_speed_map, high_speed_map, CV_MEDIAN, 3, 0, 0.0, 0.0); kms_crowd_detector_substract_background (frame_actual_gray, crowddetector->priv->background, substract_background_to_actual); cvThreshold (substract_background_to_actual, substract_background_to_actual, 70.0, 255.0, CV_THRESH_OTSU); cvCanny (substract_background_to_actual, substract_background_to_actual, 70.0, 150.0, 3); if (crowddetector->priv->acumulated_edges == NULL) { cvCopy (substract_background_to_actual, crowddetector->priv->acumulated_edges, 0); } else { cvAddWeighted (crowddetector->priv->acumulated_edges, EDGES_ADD_RATIO, substract_background_to_actual, 1 - EDGES_ADD_RATIO, 0, crowddetector->priv->acumulated_edges); } kms_crowd_detector_process_edges_image (crowddetector, low_speed_map, 3); cvErode (low_speed_map, low_speed_map, 0, 1); low_speed_pointer = (uint8_t *) low_speed_map->imageData; high_speed_pointer = (uint8_t *) high_speed_map->imageData; actual_motion_pointer = (uint8_t *) actual_motion->imageData; binary_actual_motion_pointer = (uint8_t *) binary_actual_motion->imageData; for (h = 0; h < low_speed_map->height; h++) { low_speed_pointer_aux = low_speed_pointer; high_speed_pointer_aux = high_speed_pointer; actual_motion_pointer_aux = actual_motion_pointer; binary_actual_motion_pointer_aux = binary_actual_motion_pointer; for (w = 0; w < low_speed_map->width; w++) { if (*high_speed_pointer_aux == 0) { actual_motion_pointer_aux[0] = 255; binary_actual_motion_pointer_aux[0] = 255; } if (*low_speed_pointer_aux == 255) { *actual_motion_pointer_aux = 0; actual_motion_pointer_aux[2] = 255; binary_actual_motion_pointer_aux[0] = 255; } else if (*high_speed_pointer_aux == 0) { actual_motion_pointer_aux[0] = 255; } low_speed_pointer_aux++; high_speed_pointer_aux++; actual_motion_pointer_aux = actual_motion_pointer_aux + 3; binary_actual_motion_pointer_aux++; } low_speed_pointer += low_speed_map->widthStep; high_speed_pointer += high_speed_map->widthStep; actual_motion_pointer += actual_motion->widthStep; binary_actual_motion_pointer += binary_actual_motion->widthStep; } int curve; for (curve = 0; curve < crowddetector->priv->num_rois; curve++) { if (crowddetector->priv->rois_data[curve].send_optical_flow_event == TRUE) { CvRect container = kms_crowd_detector_get_square_roi_contaniner (crowddetector, curve); cvSetImageROI (crowddetector->priv->actual_image, container); cvSetImageROI (crowddetector->priv->previous_image, container); cvSetImageROI (actual_motion, container); kms_crowd_detector_compute_optical_flow (crowddetector, binary_actual_motion, container, curve); cvResetImageROI (crowddetector->priv->actual_image); cvResetImageROI (crowddetector->priv->previous_image); } } { uint8_t *orig_row_pointer = (uint8_t *) crowddetector->priv->actual_image->imageData; uint8_t *overlay_row_pointer = (uint8_t *) actual_motion->imageData; for (h = 0; h < crowddetector->priv->actual_image->height; h++) { uint8_t *orig_column_pointer = orig_row_pointer; uint8_t *overlay_column_pointer = overlay_row_pointer; for (w = 0; w < crowddetector->priv->actual_image->width; w++) { int c; for (c = 0; c < crowddetector->priv->actual_image->nChannels; c++) { if (overlay_column_pointer[c] != 0) { orig_column_pointer[c] = overlay_column_pointer[c]; } } orig_column_pointer += crowddetector->priv->actual_image->nChannels; overlay_column_pointer += actual_motion->nChannels; } orig_row_pointer += crowddetector->priv->actual_image->widthStep; overlay_row_pointer += actual_motion->widthStep; } } if (crowddetector->priv->num_rois != 0) { cvPolyLine (crowddetector->priv->actual_image, crowddetector->priv->curves, crowddetector->priv->n_points, crowddetector->priv->num_rois, 1, cvScalar (255, 255, 255, 0), 1, 8, 0); } cvNot (high_speed_map, high_speed_map); kms_crowd_detector_roi_analysis (crowddetector, low_speed_map, high_speed_map); cvReleaseImage (&frame_actual_gray); cvReleaseImage (&actual_lbp); cvReleaseImage (&lbp_temporal_result); cvReleaseImage (&add_lbps_result); cvReleaseImage (&lbps_alpha_result_rgb); cvReleaseImage (&actual_image_masked); cvReleaseImage (&substract_background_to_actual); cvReleaseImage (&low_speed_map); cvReleaseImage (&high_speed_map); cvReleaseImage (&actual_motion); cvReleaseImage (&binary_actual_motion); gst_buffer_unmap (frame->buffer, &info); return GST_FLOW_OK; }
CvSeq* findSquares4(IplImage *img, CvMemStorage* storage) { CvSeq* contours; int i, c, l, N = 11; int thresh = 50; CvSize sz = cvSize(img->width & -2, img->height & -2); IplImage* timg = cvCloneImage(img); IplImage* gray = cvCreateImage(sz, 8, 1); IplImage* pyr = cvCreateImage(cvSize(sz.width / 2, sz.height / 2), 8, 3); IplImage* tgray; CvSeq* result; // 创建一个空序列用处储存轮廓角点 CvSeq* squares = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvPoint), storage); cvSetImageROI(timg, cvRect(0, 0, sz.width, sz.height)); // 过滤噪音 //cvPyrDown(timg, pyr, 7); tgray = cvCreateImage(sz, 8, 1); //cvPyrUp(pyr, timg, 7); // 红绿蓝3色分别提取 for (int c = 0; c < 3; c++) { cvSetImageCOI(timg, c + 1); cvCopy(timg, tgray, 0); // 尝试各种阈值提取 for (int l = 0; l < N; l++) { if (l == 0) { cvCanny(tgray, gray, 0, thresh, 5); cvDilate(gray, gray, 0, 1); } else { cvThreshold(tgray, gray, (l + 1) * 255 / N, 255, CV_THRESH_BINARY); } // 找到轮廓并存储在队列中 cvFindContours(gray, storage, &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0)); // 遍历每一个轮廓 while (contours) { // 使用指定的精度逼近多边形曲线 result = cvApproxPoly(contours, sizeof(CvContour), storage, CV_POLY_APPROX_DP, cvContourPerimeter(contours) * 0.02, 0); if (result->total == 4 && fabs( cvContourArea( result, CV_WHOLE_SEQ)) > 500 && fabs( cvContourArea( result, CV_WHOLE_SEQ)) < 100000 && cvCheckContourConvexity( result)) { double s = 0, t; for (int i = 0; i < 5; i++) { if (i >= 2) { t = fabs( angle( (CvPoint*) cvGetSeqElem( result, i), (CvPoint *) cvGetSeqElem( result, i - 2), (CvPoint *) cvGetSeqElem( result, i - 1))); s = s > t ? s : t; } } // 如果余弦值足够小, 可以认定角度为90度, 是直角 if (s < 0.08) { for (int i = 0; i < 4; i++) { cvSeqPush(squares, (CvPoint *) cvGetSeqElem( result, i)); } } } contours = contours->h_next; } } } cvReleaseImage(&gray); cvReleaseImage (&pyr); cvReleaseImage(&tgray); cvReleaseImage(&timg); return squares; }
/************************************** * Definition: Finds squares in an image with the given minimum size * * (Taken from the API and modified slightly) * Doesn't require exactly 4 sides, convexity or near 90 deg angles either ('findBlobs') * * Parameters: the image to find squares in and the minimum area for a square * * Returns: a squares_t linked list **************************************/ squares_t* Camera::findSquares(IplImage *img, int areaThreshold) { CvSeq* contours; CvMemStorage *storage; int i, j, area; CvPoint ul, lr, pt, centroid; CvSize sz = cvSize( img->width, img->height); IplImage * canny = cvCreateImage(sz, 8, 1); squares_t *sq_head, *sq, *sq_last; CvSeqReader reader; // Create storage storage = cvCreateMemStorage(0); // Pyramid images for blurring the result IplImage* pyr = cvCreateImage(cvSize(sz.width/2, sz.height/2), 8, 1); CvSeq* result; double s, t; // Create an empty sequence that will contain the square's vertices CvSeq* squares = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvPoint), storage); // Select the maximum ROI in the image with the width and height divisible by 2 cvSetImageROI(img, cvRect(0, 0, sz.width, sz.height)); // Down and up scale the image to reduce noise cvPyrDown( img, pyr, CV_GAUSSIAN_5x5 ); cvPyrUp( pyr, img, CV_GAUSSIAN_5x5 ); // Apply the canny edge detector and set the lower to 0 (which forces edges merging) cvCanny(img, canny, 0, 50, 3); // Dilate canny output to remove potential holes between edge segments cvDilate(canny, canny, 0, 2); // Find the contours and store them all as a list // was CV_RETR_EXTERNAL cvFindContours(canny, storage, &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0)); // Test each contour to find squares while (contours) { // Approximate a contour with accuracy proportional to the contour perimeter result = cvApproxPoly(contours, sizeof(CvContour), storage, CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.1, 0 ); // Note: absolute value of an area is used because // area may be positive or negative - in accordance with the // contour orientation if (result->total >= 4 && fabs(cvContourArea(result,CV_WHOLE_SEQ,0)) > areaThreshold) { s=0; for(i=0; i<5; i++) { // Find the minimum angle between joint edges (maximum of cosine) if(i >= 2) { t = fabs(ri_angle((CvPoint*)cvGetSeqElem(result, i), (CvPoint*)cvGetSeqElem(result, i-2), (CvPoint*)cvGetSeqElem( result, i-1 ))); s = s > t ? s : t; } } for( i = 0; i < 4; i++ ) { cvSeqPush(squares, (CvPoint*)cvGetSeqElem(result, i)); } } // Get the next contour contours = contours->h_next; } // initialize reader of the sequence cvStartReadSeq(squares, &reader, 0); sq_head = NULL; sq_last = NULL; sq = NULL; // Now, we have a list of contours that are squares, find the centroids and area for(i=0; i<squares->total; i+=4) { // Find the upper left and lower right coordinates ul.x = 1000; ul.y = 1000; lr.x = 0; lr.y = 0; for(j=0; j<4; j++) { CV_READ_SEQ_ELEM(pt, reader); // Upper Left if(pt.x < ul.x) ul.x = pt.x; if(pt.y < ul.y) ul.y = pt.y; // Lower right if(pt.x > lr.x) lr.x = pt.x; if(pt.y > lr.y) lr.y = pt.y; } // Find the centroid centroid.x = ((lr.x - ul.x) / 2) + ul.x; centroid.y = ((lr.y - ul.y) / 2) + ul.y; // Find the area area = (lr.x - ul.x) * (lr.y - ul.y); // Add it to the storage sq = new squares_t; // Fill in the data sq->area = area; sq->center.x = centroid.x; sq->center.y = centroid.y; sq->next = NULL; if(sq_last == NULL) sq_head = sq; else sq_last->next = sq; sq_last = sq; } // Release the temporary images and data cvReleaseImage(&canny); cvReleaseImage(&pyr); cvReleaseMemStorage(&storage); return sq_head; }