IplImage *process(IplImage **_img) { fprintf(stderr, "Processing image:\n"); IplImage *img = *_img; /* Convert to HSV */ print_time("Converting to HSV"); CvSize size = cvGetSize(img); IplImage *hsv = cvCreateImage(size, IPL_DEPTH_8U, 3); cvCvtColor(img, hsv, CV_BGR2HSV); /* Generate mask */ CvMat *mask = cvCreateMat(size.height, size.width, CV_8UC1); //cvInRangeS(hsv, cvScalar(0.11*256, 0.60*256, 0.20*256, 0),cvScalar(0.14*256, 1.00*256, 1.00*256, 0), mask); cvInRangeS(hsv, cvScalar(0,0.6*256,0.6*256,0),cvScalar(0.21*256,256,256,0), mask); cvReleaseImage(&hsv); /* Perform morphological ops */ print_time("Performing morphologies"); IplConvKernel *se21 = cvCreateStructuringElementEx(21, 21, 10, 10, CV_SHAPE_RECT, NULL); IplConvKernel *se11 = cvCreateStructuringElementEx(11, 11, 5, 5, CV_SHAPE_RECT, NULL); cvClose(mask, mask, se21); cvOpen(mask, mask, se11); cvReleaseStructuringElement(&se21); cvReleaseStructuringElement(&se11); /* Hough transform */ IplImage *hough_in = cvCreateImage(size, 8, 1); cvCopy(mask, hough_in, NULL); int rows=size.height; int cols=size.width; int j,k; int breakflag=0; for(j=0;j<rows;j++) { for(k=0;k<cols;k++) { CvScalar val=cvGet2D(hough_in,j,k); if(val.val[0]==255) { sprintf(dat,"%d-%d:",k,j); int rc = serialport_write(fd, dat); if(rc==-1) return 0; fprintf(fp,"%d %d\n",k,j); breakflag=1; break; } } if(breakflag) break; } return hough_in; }
static GstFlowReturn gst_skin_transform_ip(GstBaseTransform * btrans, GstBuffer * gstbuf) { GstSkin *skin = GST_SKIN (btrans); GST_SKIN_LOCK (skin); ////////////////////////////////////////////////////////////////////////////// // Image preprocessing: color space conversion etc // get image data from the input, which is BGR/RGB skin->cvRGBA->imageData = (char*)GST_BUFFER_DATA(gstbuf); cvCvtColor(skin->cvRGBA, skin->cvRGB, CV_BGRA2BGR); ////////////////////////////////////////////////////////////////////////////// // here goes the bussiness logic ////////////////////////////////////////////////////////////////////////////// ///////////// SKIN COLOUR BLOB FACE DETECTION///////////////////////////////// ////////////////////////////////////////////////////////////////////////////// if( skin->enableskin ) { int display = 1; if( METHOD_HSV == skin->method ){ // HSV gstskin_find_skin_center_of_mass( skin, display); } else if( METHOD_RGB == skin->method ){ // RGB gstskin_find_skin_center_of_mass2( skin, display); } } ////////////////////////////////////////////////////////////////////////////// // After this we have a RGB Black and white image with the skin, in skin->cvRGB // Just copy one channel of the RGB skin, which anyway has just values 255 or 0 // and save it for later cvSplit(skin->cvRGB, skin->chA, NULL, NULL, NULL); cvErode( skin->chA, skin->chA, cvCreateStructuringElementEx(3,3, 1,1, CV_SHAPE_RECT,NULL), 1); cvDilate(skin->chA, skin->chA, cvCreateStructuringElementEx(3,3, 1,1, CV_SHAPE_RECT,NULL), 2); cvErode( skin->chA, skin->chA, cvCreateStructuringElementEx(3,3, 1,1, CV_SHAPE_RECT,NULL), 1); // copy the skin output to the alpha channel in the output image cvSplit(skin->cvRGBA, skin->ch1, skin->ch2, skin->ch3, NULL); cvMerge(skin->ch1, skin->ch2, skin->ch3, skin->chA, skin->cvRGBA); ////////////////////////////////////////////////////////////////////////////// // if we want to display, just overwrite the output if( skin->display ){ cvCvtColor(skin->chA, skin->cvRGBA, CV_GRAY2RGB); } GST_SKIN_UNLOCK (skin); return GST_FLOW_OK; }
static gboolean gst_motiondetect_apply ( IplImage * cvReferenceImage, const IplImage * cvCurrentImage, const IplImage * cvMaskImage, float noiseThreshold) { IplConvKernel *kernel = cvCreateStructuringElementEx (3, 3, 1, 1, CV_SHAPE_ELLIPSE, NULL); int threshold = (int)((1 - noiseThreshold) * 255); IplImage *cvAbsDiffImage = cvReferenceImage; double maxVal = -1.0; cvAbsDiff( cvReferenceImage, cvCurrentImage, cvAbsDiffImage ); cvThreshold (cvAbsDiffImage, cvAbsDiffImage, threshold, 255, CV_THRESH_BINARY); cvErode (cvAbsDiffImage, cvAbsDiffImage, kernel, 1); cvReleaseStructuringElement(&kernel); cvMinMaxLoc(cvAbsDiffImage, NULL, &maxVal, NULL, NULL, cvMaskImage ); if (maxVal > 0) { return TRUE; } else { return FALSE; } }
/*! * @function close * @discussion Perform image closing with a custom kernel. * @updated 2011-4-13 */ char* close(IplImage* frameImage) { //Select based on the capture dimensions. switch(captureSize) { case(SMALL_BACK): case(SMALL_FRONT): convertedImage = cvCreateImage(cvSize(192, 144), IPL_DEPTH_8U, 4); break; case(MEDIUM_BACK): case(LARGE_FRONT): case(MEDIUM_FRONT): convertedImage = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 4); break; case(LARGE_BACK): convertedImage = cvCreateImage(cvSize(1280, 720), IPL_DEPTH_8U, 4); break; } cvCopy(frameImage, convertedImage, 0); IplConvKernel* closeKernel = cvCreateStructuringElementEx(7, 7, 3, 3, CV_SHAPE_RECT, NULL); //Default number of iterations is 1. We'll do a few iterations to make the effect more pronounced. cvMorphologyEx(convertedImage, convertedImage, NULL, (IplConvKernel *)closeKernel, CV_MOP_CLOSE, 3); return convertedImage->imageDataOrigin; }
// // function "noiseRemoval": // applying the open morphology on the image of color segmentation // IplImage* noiseRemoval(IplImage* inputImage) { int iWidth = inputImage->width; int iHeight = inputImage->height; IplImage* imageNoiseRem = cvCreateImage(cvSize(iWidth,iHeight),IPL_DEPTH_8U,1); if(!imageNoiseRem) exit(EXIT_FAILURE); IplConvKernel* structureEle1 =cvCreateStructuringElementEx( 3, 3, 1, 1, CV_SHAPE_ELLIPSE, 0); int operationType[2] = { CV_MOP_OPEN, CV_MOP_CLOSE }; //seems function open and close, as well as erode and dilate reversed. cvMorphologyEx(inputImage,imageNoiseRem,NULL,structureEle1,operationType[0],1); //cvMorphologyEx(inputImage,imageNoiseRem,NULL,structureEle1,operationType[1],1); //in order to connect regions breaked by mophology operation ahead //cvErode(imageNoiseRem,imageNoiseRem,structureEle1,1); cvReleaseStructuringElement(&structureEle1); return imageNoiseRem; }
void test_min_max_edge_detection(){ tester->start_timer(); float max_value; float min_value; IplImage * max_image = cvCreateImage(cvGetSize(image),image->depth,image->nChannels); IplImage * min_image = cvCreateImage(cvGetSize(image),image->depth,image->nChannels); int values [] = {0,0,0,0,0,0,0,0,0}; IplConvKernel * kernel = cvCreateStructuringElementEx(3,3,1,1,CV_SHAPE_RECT,values); cvErode(image,min_image,kernel); cvDilate(image,max_image,kernel); //display_image("erode",min_image); //display_image("dilate",max_image); int kernel_radius = 1; //test_get_normalization_parameters(image,min_image,max_image,kernel_radius,max_value,min_value); IplImage * rimage = cvCloneImage(image); for(int y=0;y<image->height;y++){ for(int x=0;x<image->width;x++){ //int value = get_pixel(image,y,x); //printf("%d \n",value); float enhanced_value = test_min_max_enhancement(y,x,kernel_radius,image,min_image,max_image); //int new_value = 255 * enhanced_value/max_value; int new_value = enhanced_value; set_pixel(rimage,y,x,new_value); } } tester->stop_timer(); display_image("result",rimage); cvSaveImage("images/edges.png",rimage); }
regionDetector::regionDetector(CvSize size) : NONE(0), LEFT(1), RIGHT(2), TOP(3), BOTTOM(4), WHITE(255) { original = 0; threshold = 20; element = cvCreateStructuringElementEx (3, 3, 1, 1, CV_SHAPE_CROSS, NULL); result = cvCreateImage(size, IPL_DEPTH_8U, 1); }
void BlobTracking::process(const cv::Mat &img_input, const cv::Mat &img_mask, cv::Mat &img_output) { if(img_input.empty() || img_mask.empty()) return; loadConfig(); if(firstTime) saveConfig(); IplImage* frame = new IplImage(img_input); cvConvertScale(frame, frame, 1, 0); IplImage* segmentated = new IplImage(img_mask); IplConvKernel* morphKernel = cvCreateStructuringElementEx(5, 5, 1, 1, CV_SHAPE_RECT, NULL); cvMorphologyEx(segmentated, segmentated, NULL, morphKernel, CV_MOP_OPEN, 1); if(showBlobMask) cvShowImage("Blob Mask", segmentated); IplImage* labelImg = cvCreateImage(cvGetSize(frame), IPL_DEPTH_LABEL, 1); cvb::CvBlobs blobs; unsigned int result = cvb::cvLabel(segmentated, labelImg, blobs); //cvb::cvFilterByArea(blobs, 500, 1000000); cvb::cvFilterByArea(blobs, minArea, maxArea); //cvb::cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX); if(debugBlob) cvb::cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX|CV_BLOB_RENDER_CENTROID|CV_BLOB_RENDER_ANGLE|CV_BLOB_RENDER_TO_STD); else cvb::cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX|CV_BLOB_RENDER_CENTROID|CV_BLOB_RENDER_ANGLE); cvb::cvUpdateTracks(blobs, tracks, 200., 5); if(debugTrack) cvb::cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX|CV_TRACK_RENDER_TO_STD); else cvb::cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX); //std::map<CvID, CvTrack *> CvTracks if(showOutput) cvShowImage("Blob Tracking", frame); cv::Mat img_result(frame); img_result.copyTo(img_output); //cvReleaseImage(&frame); //cvReleaseImage(&segmentated); cvReleaseImage(&labelImg); delete frame; delete segmentated; cvReleaseBlobs(blobs); cvReleaseStructuringElement(&morphKernel); firstTime = false; }
void test_backremoval(){ float max_value; float min_value; IplImage * rimage = cvCreateImage(cvGetSize(image),image->depth,image->nChannels); IplImage * min_image = cvCreateImage(cvGetSize(image),image->depth,image->nChannels); int values [] = {0,0,0,0,0,0,0,0,0}; IplConvKernel * kernel = cvCreateStructuringElementEx(3,3,1,1,CV_SHAPE_RECT,values); cvDilate(image,rimage,kernel); display_image("dilate",rimage); cvErode(rimage,rimage,kernel); display_image("erode",rimage); //cvDilate(max_image,max_image,kernel); //cvDilate(max_image,max_image,kernel); display_image("original",image); display_image("rimage",rimage); //rimage = test_background_subtraction(image,max_image); //display_image("back removed",rimage); //IplImage * otsu_image = otsu_algorithm(rimage); //otsu_image = invert_image(otsu_image); //display_image("otsu image",otsu_image); }
IplImage *contoursGetOutlineMorh(IplImage *src, IplImage *temp, int mask) { int radius = 3; int cols = radius * 2 + 1; int rows = cols; IplImage *res; IplImage *bin = cvCreateImage(cvGetSize(src), src->depth, 1); cvAdaptiveThreshold(src, bin, 255, CV_ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY, 7, 1); if (mask == 1) { IplImage *mask = cvCreateImage(cvGetSize(src), src->depth, 1); res = cvCreateImage(cvGetSize(src), src->depth, 1); cvThreshold(src, mask, 0, 255, CV_THRESH_BINARY_INV + CV_THRESH_OTSU); cvOr(bin, mask, res, NULL); cvReleaseImage(&mask); } else { res = bin; } IplConvKernel *element = cvCreateStructuringElementEx(cols, rows, radius, radius, CV_SHAPE_ELLIPSE, NULL); cvMorphologyEx(res, res, temp, element, CV_MOP_OPEN, 1); cvReleaseStructuringElement(&element); radius = 9; cols = radius * 2 + 1; rows = cols; element = cvCreateStructuringElementEx(cols, rows, radius, radius, CV_SHAPE_ELLIPSE, NULL); cvMorphologyEx(res, res, temp, element, CV_MOP_CLOSE, 1); cvReleaseStructuringElement(&element); radius = 7; cols = radius * 2 + 1; rows = cols; element = cvCreateStructuringElementEx(cols, rows, radius, radius, CV_SHAPE_ELLIPSE, NULL); cvErode(res, res, element, 1); cvDilate(res, res, element, 1); contoursDrawBorder(res); cvReleaseStructuringElement(&element); cvReleaseImage(&temp); return res; }
// Dilate -> Erode -> Dilate void Filterling ::noiseEraser (IplImage *srcImage, IplImage *dstImage) { int COL =3, ROW = 3, ITERATIONS = 1 ; IplConvKernel *elem =cvCreateStructuringElementEx (COL, ROW, 0, 0, CV_SHAPE_RECT, NULL) ; //cvMorphologyEx (img, img, NULL, elem, CV_MOP_CLOSE, 1) ; // 닫힘 cvDilate (srcImage, dstImage, elem, ITERATIONS) ; // 팽창 cvErode (dstImage, dstImage, elem, ITERATIONS * 2) ; //침식 cvDilate (dstImage, dstImage, elem, ITERATIONS) ; // 팽창 }
IplImage *closeImage(IplImage *source) { int radius = 3; IplConvKernel* Kern = cvCreateStructuringElementEx(radius*2+1, radius*2+1, radius, radius, CV_SHAPE_RECT, NULL); cvErode(source, source, Kern, 1); cvDilate(source, source, Kern, 1); cvReleaseStructuringElement(&Kern); return source; }
/* * Transform the image into a two colored image, one color for the color we want to track, another color for the others colors * From this image, we get two datas : the number of pixel detected, and the center of gravity of these pixel */ CvPoint binarisation(IplImage* image, int *nbPixels) { int x, y; CvScalar pixel; IplImage *hsv, *mask; IplConvKernel *kernel; int sommeX = 0, sommeY = 0; *nbPixels = 0; // Create the mask &initialize it to white (no color detected) mask = cvCreateImage(cvGetSize(image), image->depth, 1); // Create the hsv image hsv = cvCloneImage(image); cvCvtColor(image, hsv, CV_BGR2HSV); cvShowImage("GeckoGeek Color Rectification", hsv); // We create the mask cvInRangeS(hsv, cvScalar(h - tolerance -1, s - tolerance, 0), cvScalar(h + tolerance -1, s + tolerance, 255), mask); // Create kernels for the morphological operation kernel = cvCreateStructuringElementEx(5, 5, 2, 2, CV_SHAPE_ELLIPSE); // Morphological opening (inverse because we have white pixels on black background) cvDilate(mask, mask, kernel, 1); cvErode(mask, mask, kernel, 1); // We go through the mask to look for the tracked object and get its gravity center for(x = 0; x < mask->width; x++) { for(y = 0; y < mask->height; y++) { // If its a tracked pixel, count it to the center of gravity's calcul if(((uchar *)(mask->imageData + y*mask->widthStep))[x] == 255) { sommeX += x; sommeY += y; (*nbPixels)++; } } } // Show the result of the mask image cvShowImage("GeckoGeek Mask", mask); // We release the memory of kernels cvReleaseStructuringElement(&kernel); // We release the memory of the mask cvReleaseImage(&mask); // We release the memory of the hsv image cvReleaseImage(&hsv); // If there is no pixel, we return a center outside the image, else we return the center of gravity if(*nbPixels > 0) return cvPoint((int)(sommeX / (*nbPixels)), (int)(sommeY / (*nbPixels))); else return cvPoint(-1, -1); }
void TamatarVision::update() { vidGrabber.grabFrame(); if (vidGrabber.isFrameNew()) { // load image from videograbber colorImg.setFromPixels(vidGrabber.getPixels(), camWidth, camHeight); // convert to grayscale cvCvtColor( colorImg.getCvImage(), grayImg.getCvImage(), CV_RGB2GRAY ); grayImg.flagImageChanged(); // equalize histogram if (doHistEqualize) { cvEqualizeHist(grayImg.getCvImage(), grayImg.getCvImage() ); } // `morphological opening` if (doMorphEx) { int anchor = morphExRadius / 2; structure = cvCreateStructuringElementEx(morphExRadius, morphExRadius, anchor, anchor, CV_SHAPE_ELLIPSE); cvCopy(grayImg.getCvImage(), grayImg2.getCvImage()); cvMorphologyEx(grayImg2.getCvImage(), grayImg.getCvImage(), NULL, structure, CV_MOP_OPEN); } if (doSmoothing) { //grayImg2 = grayImg; //smoothSigmaColor=20; //smoothSigmaSpatial=20; //cvSmooth(grayImg2.getCvImage(), grayImg.getCvImage(), CV_BILATERAL, 9, 9, smoothSigmaColor, smoothSigmaSpatial); cvSmooth(grayImg.getCvImage(), grayImg.getCvImage(), CV_GAUSSIAN, 3, 3, 2, 2); } //grayImg.threshold(120); // threshold if (doThreshold) { // grayImg.threshold(threshold); grayImg2 = grayImg; cvThreshold(grayImg2.getCvImage(), grayImg.getCvImage(), threshold, thresholdMax, CV_THRESH_TOZERO); // cvAdaptiveThreshold(grayImg2.getCvImage(), grayImg.getCvImage(), threshold, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_, 3, 5); } if (doCanny) { cvCanny(grayImg.getCvImage(), grayImg.getCvImage(), cannyThres1, cannyThres2, 3); } //cvCanny5grayImg.getCvImage(),grayImg.getCvImage(), 120, 180, 3); //cvSobel(grayImg.getCvImage(), grayImg.getCvImage(), 1, 1, 3); if (doCircles) { CvMemStorage* storage = cvCreateMemStorage(0); circles = cvHoughCircles(grayImg.getCvImage(), storage, CV_HOUGH_GRADIENT, 2, grayImg.getHeight()/4, circleEdgeThres, circleAccThres, circleMinRadius, circleMaxRadius); } if (doContours) { contourFinder.findContours(grayImg, 10, (camWidth*camHeight)/2, 20, false, true); } } }
/** * Initialize images, memory, and windows */ void init() { char* msg[] = { "Blink Detection 1.0", "Copyright (c) 2009", "http://nashruddin.com", "Press 'q' to quit...", "Press 'r' to restart...", "Have fun!" }; int delay, i; capture = cvCaptureFromCAM(0); if (!capture) exit_nicely("Cannot initialize camera!"); cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, FRAME_WIDTH); cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, FRAME_HEIGHT); frame = cvQueryFrame(capture); if (!frame) exit_nicely("cannot query frame!"); cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.4, 0.4, 0, 1, 8); cvNamedWindow(wnd_name, 1); for (delay = 20, i = 0; i < 6; i++, delay = 20) while (delay) { frame = cvQueryFrame(capture); if (!frame) exit_nicely("cannot query frame!"); DRAW_TEXT(frame, msg[i], delay, 0); cvShowImage(wnd_name, frame); cvWaitKey(30); } storage = cvCreateMemStorage(0); if (!storage) exit_nicely("cannot allocate memory storage!"); kernel = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_CROSS, NULL); gray = cvCreateImage(cvGetSize(frame), 8, 1); prev = cvCreateImage(cvGetSize(frame), 8, 1); diff = cvCreateImage(cvGetSize(frame), 8, 1); tpl = cvCreateImage(cvSize(TPL_WIDTH, TPL_HEIGHT), 8, 1); if (!kernel || !gray || !prev || !diff || !tpl) exit_nicely("system error."); gray->origin = frame->origin; prev->origin = frame->origin; diff->origin = frame->origin; cvNamedWindow(wnd_debug, 1); }
Blink::Blink() { leftEye=NULL; rightEye=NULL; prev=NULL; curr=NULL; kernel = cvCreateStructuringElementEx(7,7,1,1, CV_SHAPE_CROSS,NULL); //3, 3, 1, 1, CV_SHAPE_CROSS, NULL); leftEyeTracker=NULL; rightEyeTracker=NULL; oriImage=NULL; }
int main (int argc, const char * argv[]) { IplImage* redLight = cvLoadImage(argv[1], CV_LOAD_IMAGE_COLOR); IplImage* greenLight = cvLoadImage(argv[2], CV_LOAD_IMAGE_COLOR); CvSize sz = cvGetSize(redLight); IplImage* redScreened = cvCreateImage(sz, redLight->depth, redLight->nChannels); IplImage* redErode = cvCreateImage(sz, redLight->depth, redLight->nChannels); IplImage* redDilate = cvCreateImage(sz, redLight->depth, redLight->nChannels); ColorScreening(redLight, redScreened); cvDilate(redScreened, redDilate, cvCreateStructuringElementEx(13, 13, 6, 6, CV_SHAPE_RECT, NULL), 1); cvErode(redDilate, redErode, cvCreateStructuringElementEx(13, 13, 6, 6, CV_SHAPE_RECT, NULL), 1); // cvDilate(redScreened, redDilate, NULL, 1); // cvErode(redDilate, redErode, NULL, 1); cvNamedWindow("Light Detector",CV_WINDOW_AUTOSIZE); cvShowImage("Light Detector", redLight); cvWaitKey(0); cvShowImage("Light Detector",redScreened); cvWaitKey(0); cvShowImage("Light Detector", redDilate); cvWaitKey(0); cvShowImage("Light Detector", redErode); cvWaitKey(0); cvShowImage("Light Detector", greenLight); cvWaitKey(0); ColorScreening(greenLight, redScreened); cvDilate(redScreened, redDilate, cvCreateStructuringElementEx(13, 13, 6, 6, CV_SHAPE_RECT, NULL), 1); cvErode(redDilate, redErode, cvCreateStructuringElementEx(13, 13, 6, 6, CV_SHAPE_RECT, NULL), 1); cvShowImage("Light Detector", redErode); cvWaitKey(0); cvReleaseImage(&greenLight); cvReleaseImage(&redLight); cvReleaseImage(&redScreened); cvReleaseImage(&redErode); cvReleaseImage(&redDilate); cvDestroyWindow("Light Detector"); }
void init_ctx(struct ctx *ctx) { ctx->thr_image = cvCreateImage(cvGetSize(ctx->image), 8, 1); ctx->temp_image1 = cvCreateImage(cvGetSize(ctx->image), 8, 1); ctx->temp_image3 = cvCreateImage(cvGetSize(ctx->image), 8, 3); ctx->kernel = cvCreateStructuringElementEx(9, 9, 4, 4, CV_SHAPE_RECT,NULL); ctx->contour_st = cvCreateMemStorage(0); ctx->hull_st = cvCreateMemStorage(0); ctx->temp_st = cvCreateMemStorage(0); ctx->fingers = (CvPoint*)calloc(NUM_FINGERS + 1, sizeof(CvPoint)); ctx->defects = (CvPoint*)calloc(NUM_DEFECTS, sizeof(CvPoint)); }
//形态学结构元素的复制 IplConvKernel* lhStructuringElementCopy(IplConvKernel* se) { IplConvKernel* copy = cvCreateStructuringElementEx( se->nCols, se->nRows, se->anchorX, se->anchorY, 0, NULL ); copy->nShiftR = se->nShiftR; memcpy( copy->values, se->values, sizeof(int) * se->nRows * se->nCols ); return copy; }
void test_time_opencv_erode(){ //image = cvLoadImage("C:\\Users\\ninao\\Documents\\images\\dibco_test_images\\H04.bmp",0); display_image("image",image); IplImage * rimage = cvCloneImage(image); int values [] = {0,0,0,0,0,0,0,0,0}; IplConvKernel * kernel = cvCreateStructuringElementEx(3,3,1,1,CV_SHAPE_RECT,values); tester->start_timer(); cvErode(image,rimage,kernel); tester->stop_timer(); display_image("erode",rimage); }
regionTracker::regionTracker(cameraImages *cam) { cm = cam; initializeFlag = 0; result = cvCreateImage(cm->getImageSize(), IPL_DEPTH_8U, 1); contractedResult = cvCreateImage(cm->getImageSize(), IPL_DEPTH_8U, 1); element = cvCreateStructuringElementEx (3, 3, 1, 1, CV_SHAPE_CROSS, NULL); intensity = cm->getIntensityImg(); depth = cm->getDepthImg(); human = new regionDetector(cm->getImageSize()); fd = new faceDetector(cm->getImageSize()); }
/** * \brief Takes frame and applies image processing techniques to filter out non-laser line points. Updates images used for runtime display. */ int filterFrame() { args[0] = frame; cvCvtColor(frame, frameHSV, CV_BGR2HSV); //convert RGB values of frame to HSV and place in frameHSV cvSplit(frameHSV, hue, saturation, value, NULL); //split frameHSV into constituent components and place appropriately; we are done with frameHSV args[1] = hue; args[2] = value; cvCopy(saturation, saturation2); //make an additional copy of saturation for display //args[8] = saturation2; //cvShowImage("saturation", saturation2); cvSmooth(frame, frameHSV, CV_BLUR, 20, 20 ); //smooth frame and store in frameHSV //cvShowImage("Smoothed frame", frameHSV); cvSplit(frame, blue, green, red, NULL); //split frame into its RGB components cvSplit(frameHSV, blue2, green2, red2, NULL); //split the smoothed version into its RGB components cvMin(blue, green, min_bg); //take the min of blue and green and store in min_bg args[3] = min_bg; //cvShowImage("minimum of blue and green", min_bg); cvSub(red, min_bg, red_last); //take red less the min of the blue and green //cvShowImage("red_last = red - min_bg", red_last); cvThreshold(red_last, red_last, thresholdValue, 255, CV_THRESH_BINARY_INV); //threshold the red_last //cvShowImage("threshold of red_last", red_last); args[4] = red_last; cvSub(red, red2, deltaRed); //cvShowImage("deltaRed = Original red - smooth red", deltaRed); cvThreshold(deltaRed, deltaRed, thresholdValue, 255, CV_THRESH_BINARY); //cvShowImage("threshold(deltaRed)", deltaRed); cvCopy(deltaRed, alpha); cvInRangeS(saturation, cvScalar(0), cvScalar(25), saturation); //cvShowImage("Low saturation in original frame", saturation); cvInRangeS(hue, cvScalar(49), cvScalar(125), beta); //cvShowImage("Mixed hue in original frame", beta); cvOr(beta, saturation, beta); //cvShowImage("beta = Low saturation OR mixed hue", beta); cvOr(beta, red_last, beta); //cvShowImage("beta = beta OR red_last", beta); //args[5] = alpha; args[5] = beta; IplConvKernel*mask= cvCreateStructuringElementEx(5, 5, 2, 2, 2, NULL ); cvDilate(saturation2,dialated, mask, 20); //cvShowImage("dilate original saturation", dialated); args[6] = dialated; cvThreshold(dialated, dialated, 100, 255, CV_THRESH_BINARY); cvErode(dialated,eroded, mask, 30); args[7] = eroded; cvSub(alpha, beta, orig_filter); args[8] = orig_filter; cvAnd(orig_filter, eroded, zeta); args[9] = zeta; return 0; }
/* * Transform the image into a two colored image, one color for the color we want to track, another color for the others colors * From this image, we get two datas : the number of pixel detected, and the center of gravity of these pixel */ CvPoint ColourToTrack::binarise(IplImage* image) { int x, y; IplImage *hsv; IplConvKernel *kernel; int sommeX = 0, sommeY = 0; nbPixels = 0; if(mask==NULL) mask = cvCreateImage(cvGetSize(image), image->depth, 1); // Create the hsv image hsv = cvCloneImage(image); //cvCvtColor(image, hsv, CV_BGR2HSV); cvCvtColor(image, hsv, CV_BGR2Lab); // We create the mask //cvInRangeS(hsv, cvScalar(colour.hsv.h - TOLERANCE -1, colour.hsv.s - TOLERANCE, 0), cvScalar(colour.hsv.h + TOLERANCE -1, colour.hsv.s + TOLERANCE, 255), mask); cvInRangeS(hsv, cvScalar(0, colour.hsv.s - TOLERANCE -1, colour.hsv.v - TOLERANCE), cvScalar(255, colour.hsv.s + TOLERANCE -1, colour.hsv.v + TOLERANCE), mask); // Create kernels for the morphological operation kernel = cvCreateStructuringElementEx(5, 5, 2, 2, CV_SHAPE_ELLIPSE); // Morphological closing (inverse because we have white pixels on black background) cvDilate(mask, mask, kernel, 1); cvErode(mask, mask, kernel, 1); // We go through the mask to look for the tracked object and get its gravity center for(x = 0; x < mask->width; x++) { for(y = 0; y < mask->height; y++) { // If it's a tracked pixel, count it to the center of gravity's calcul if(((uchar *)(mask->imageData + y*mask->widthStep))[x] == 255) { sommeX += x; sommeY += y; nbPixels++; } } } // We release the memory of kernels cvReleaseStructuringElement(&kernel); // We release the memory of the hsv image cvReleaseImage(&hsv); // If there is no pixel, we return a center outside the image, else we return the center of gravity if(nbPixels > 0) return cvPoint((int)(sommeX / (nbPixels)), (int)(sommeY / (nbPixels))); else return cvPoint(-1, -1); }
void CTransformImage::Morphology() { if(!m_transImage) return; IplConvKernel* element = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_RECT, NULL); cvDilate(m_transImage, m_transImage, element, 1); cvDilate(m_transImage, m_transImage, element, 1); cvErode (m_transImage, m_transImage, element, 1); cvErode (m_transImage, m_transImage, element, 1); cvReleaseStructuringElement(&element); }
static int parse_iplconvkernel(IplConvKernel **kernel, char *buf, void *log_ctx) { char shape_filename[128] = "", shape_str[32] = "rect"; int cols = 0, rows = 0, anchor_x = 0, anchor_y = 0, shape = CV_SHAPE_RECT; int *values = NULL, ret = 0; sscanf(buf, "%dx%d+%dx%d/%32[^=]=%127s", &cols, &rows, &anchor_x, &anchor_y, shape_str, shape_filename); if (!strcmp(shape_str, "rect" )) shape = CV_SHAPE_RECT; else if (!strcmp(shape_str, "cross" )) shape = CV_SHAPE_CROSS; else if (!strcmp(shape_str, "ellipse")) shape = CV_SHAPE_ELLIPSE; else if (!strcmp(shape_str, "custom" )) { shape = CV_SHAPE_CUSTOM; if ((ret = read_shape_from_file(&cols, &rows, &values, shape_filename, log_ctx)) < 0) return ret; } else { av_log(log_ctx, AV_LOG_ERROR, "Shape unspecified or type '%s' unknown.\n", shape_str); ret = AVERROR(EINVAL); goto out; } if (rows <= 0 || cols <= 0) { av_log(log_ctx, AV_LOG_ERROR, "Invalid non-positive values for shape size %dx%d\n", cols, rows); ret = AVERROR(EINVAL); goto out; } if (anchor_x < 0 || anchor_y < 0 || anchor_x >= cols || anchor_y >= rows) { av_log(log_ctx, AV_LOG_ERROR, "Shape anchor %dx%d is not inside the rectangle with size %dx%d.\n", anchor_x, anchor_y, cols, rows); ret = AVERROR(EINVAL); goto out; } *kernel = cvCreateStructuringElementEx(cols, rows, anchor_x, anchor_y, shape, values); if (!*kernel) { ret = AVERROR(ENOMEM); goto out; } av_log(log_ctx, AV_LOG_VERBOSE, "Structuring element: w:%d h:%d x:%d y:%d shape:%s\n", rows, cols, anchor_x, anchor_y, shape_str); out: av_freep(&values); return ret; }
//形态学等级滤波器(二值,默认SE为矩形3*3) void lhMorpRankFilterB(const IplImage* src, IplImage* dst, IplConvKernel* se = NULL, unsigned int rank = 0) { assert(src != NULL && dst != NULL && src != dst ); bool defaultse = false; int card; if (se == NULL) { card = 3*3; assert(rank >= 0 && rank <= card); se = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_RECT); defaultse = true; } else { card = lhStructuringElementCard(se); assert(rank >= 0 && rank <= card); } //default rank is median if (rank == 0) rank = card/2+1; IplConvKernel* semap = lhStructuringElementMap(se); CvMat *semat = cvCreateMat(semap->nRows, semap->nCols, CV_32FC1); int i; for (i=0; i<semap->nRows*semap->nCols; i++) { semat->data.fl[i] = semap->values[i]; } cvThreshold(src, dst, 0, 1, CV_THRESH_BINARY); IplImage *temp = cvCreateImage(cvGetSize(dst), 8, 1); cvFilter2D(dst, temp, semat, cvPoint(semap->anchorX, semap->anchorY)); cvThreshold(temp, dst, card-rank, 255, CV_THRESH_BINARY); cvReleaseMat(&semat); cvReleaseStructuringElement(&semap); if (defaultse) cvReleaseStructuringElement(&se); cvReleaseImage(&temp); }
void main(int argc,char *argv[]) { int c; IplImage* color_img, * hsv_img, * h_img; int flags = CV_WINDOW_AUTOSIZE; CvCapture* cv_cap = cvCaptureFromCAM(CAMERA_0); // Capture from CAMERA 0 int h = 180; CvScalar min = CV_RGB(h-15,100,0); CvScalar max = CV_RGB(h+15,256,256); /* Create ellipse to despeckle hsv. */ IplConvKernel* ellipse = cvCreateStructuringElementEx(6, 6, 1, 1, CV_SHAPE_ELLIPSE, NULL); if (!cv_cap) goto fail; cvNamedWindow("Webcam Video", flags); // create window cvNamedWindow("hsv Video", flags); // create window cvCreateTrackbar("Hue", "hsv Video", &h, 256, set_h); for(;;) { color_img = cvQueryFrame(cv_cap); // get frame if(color_img != 0) hsv_img = cvCreateImage(cvGetSize(color_img), IPL_DEPTH_8U, 3); cvCvtColor(color_img, hsv_img, CV_BGR2HSV); h_img = cvCreateImage(cvGetSize(hsv_img), IPL_DEPTH_8U, 1); min = CV_RGB(h-20,10,10); max = CV_RGB(h+20,256,256); /* Remove anything not in the hue range. */ cvInRangeS(hsv_img, min, max, h_img); /* Remove noise, or at least make the blotches bigger? */ cvErode(h_img, h_img, ellipse,1); cvDilate(h_img, h_img, ellipse,1); cvShowImage("hsv Video", h_img); // show frame cvShowImage("Webcam Video", color_img); // show frame c = cvWaitKey(KS_WAIT); // wait KS_WAIT ms or for key stroke if(c == 27) break; // if ESC, break and quit } /* clean up */ cvReleaseCapture( &cv_cap ); cvDestroyWindow("Webcam Video"); return; fail: printf("capture from cam failed\n"); }
void test_normalization_min_max(){ float max_value; float min_value; IplImage * max_image = cvCreateImage(cvGetSize(image),image->depth,image->nChannels); IplImage * min_image = cvCreateImage(cvGetSize(image),image->depth,image->nChannels); IplConvKernel * kernel = cvCreateStructuringElementEx(3,3,1,1,CV_SHAPE_RECT); cvErode(image,min_image,kernel); cvDilate(image,max_image,kernel); display_image("dilate",min_image); display_image("erode",max_image); int kernel_radius = 1; test_get_normalization_parameters(image,min_image,max_image,kernel_radius,max_value,min_value); printf("max: %f, min: %f",max_value,min_value); }
void moFlatlandColorPairFinderModule::imagePreprocess(IplImage *src){ IplConvKernel *element = 0;//the pointer for morphological strucutre cvSmooth(src,src,CV_MEDIAN,5,0,0,0);//median filter, elliminate small noise //Then erode and dilate the image IplImage *tmp = cvCreateImage(cvGetSize(src),src->depth,src->nChannels); element = cvCreateStructuringElementEx(3,3,1,1,CV_SHAPE_RECT,0);//erode and dilate element //cvErode(src, tmp, element,1);//erode the image cvCopy(src,tmp); cvDilate(tmp, src, element,5);//dilate the image cvReleaseImage(&tmp); }
int main(int argc, char** argv) { cvNamedWindow("image"); IplImage * src = cvLoadImage("../cvtest/7newsample.jpg", 0); IplImage * temp = cvCreateImage(cvGetSize(src), 8,1); IplImage * img=cvCreateImage(cvGetSize(src), 8, 1); cvCopyImage(src,temp); cvCopyImage(src, img); cvSmooth(img,img); IplConvKernel *element = 0; //定义形态学结构指针 element = cvCreateStructuringElementEx(3,3, 1, 1, CV_SHAPE_ELLIPSE, 0);//3,5,7 cvErode( src, src, element); //形态梯度 cvMorphologyEx( src, img, img, element,//NULL, //default 3*3 CV_MOP_GRADIENT, 1); cvShowImage("image", img); cvWaitKey(0); IplImage* image=img; //= cvLoadImage( "../cvtest/7newsample.jpg", CV_LOAD_IMAGE_GRAYSCALE ); IplImage* src2 =img;//= cvLoadImage( "../cvtest/7newsample.jpg"); //Changed for prettier show in color CvMemStorage* storage = cvCreateMemStorage(0); cvSmooth(image, image, CV_GAUSSIAN, 5, 5 ); CvSeq* results = cvHoughCircles( image, storage, CV_HOUGH_GRADIENT, 4, image->width/10 ); for( int i = 0; i < results->total; i++ ) { float* p = (float*) cvGetSeqElem( results, i ); CvPoint pt = cvPoint( cvRound( p[0] ), cvRound( p[1] ) ); cvCircle( src2, pt, cvRound( p[2] ), CV_RGB(0xff,0,0) ); } cvNamedWindow( "cvHoughCircles", 1 ); cvShowImage( "cvHoughCircles", src2); cvWaitKey(0); }