static void handle_dithering(int ditherMode, image *aPic, int outType, int numBits) { static int mapping[] = { 0, 1, 2, 3, 73, 79, 86, 88, 90, 93, 107, 119, 121, 123, 125, 136, 137, 138, 139, 141, 152, 153, 153, 154, 155, 155, 156, 157, 158, 159, 162, 165, 166, 167, 168, 169, 169, 170, 171, 172, 173, 174, 176, 179, 180, 181, 182, 182, 183, 183, 184, 184, 185, 185, 186, 186, 187, 187, 188, 189, 189, 190, 194, 196, 197, 197, 198, 198, 199, 199, 200, 200, 200, 201, 201, 201, 202, 202, 202, 203, 203, 203, 204, 204, 204, 205, 205, 206, 206, 207, 207, 208, 209, 209, 210, 211, 211, 212, 212, 213, 213, 213, 214, 214, 214, 215, 215, 215, 216, 216, 216, 217, 217, 217, 217, 218, 218, 218, 218, 219, 219, 219, 219, 220, 220, 220, 221, 221, 221, 221, 222, 222, 222, 223, 223, 223, 224, 224, 224, 225, 225, 225, 226, 226, 226, 227, 227, 227, 227, 228, 228, 228, 229, 229, 229, 229, 229, 230, 230, 230, 230, 231, 231, 231, 231, 231, 232, 232, 232, 232, 232, 233, 233, 233, 233, 233, 233, 234, 234, 234, 234, 234, 235, 235, 235, 235, 235, 235, 236, 236, 236, 236, 236, 237, 237, 237, 237, 237, 238, 238, 238, 238, 238, 239, 239, 239, 239, 239, 240, 240, 240, 240, 241, 241, 241, 241, 242, 242, 242, 242, 243, 243, 243, 244, 244, 244, 244, 245, 245, 245, 245, 246, 246, 246, 247, 247, 247, 248, 248, 248, 249, 249, 249, 250, 250, 250, 251, 251, 251, 252, 252, 253, 253, 254, 255, 255, }; switch(ditherMode) { case 'H': case 'h': if (outType) halftoneImage(aPic, mapping, numBits); else halftoneImage(aPic, NULL, numBits); break; case 'T': case 't': if (outType) thresholdImage(aPic, mapping, numBits); else thresholdImage(aPic, NULL, numBits); break; case 'D': case 'd': if (outType) ditherImage(aPic, mapping, numBits); else ditherImage(aPic, NULL, numBits); break; default: break; } return; }
void DigitRecognizer::preProcessImage(const cv::Mat& inImage, cv::Mat& outImage) { cv::Mat grayImage,blurredImage,thresholdImage,contourImage,regionOfInterest; std::vector<std::vector<cv::Point> > contours; if (inImage.channels()==3) { cv::cvtColor(inImage,grayImage , CV_BGR2GRAY); } else { inImage.copyTo(grayImage); } blurredImage = grayImage; //cv::GaussianBlur(grayImage, blurredImage, cv::Size(3, 3), 2, 2); cv::adaptiveThreshold(blurredImage, thresholdImage, 255, cv::ADAPTIVE_THRESH_GAUSSIAN_C, cv::THRESH_BINARY_INV, 3, 0); int rows = thresholdImage.rows; int cols = thresholdImage.cols; for (int r=0; r<rows; r++) { for (int c=0; c<cols; c++) { uchar p = thresholdImage.at<uchar>(r,c); if (p==0) continue; bool allZeros = true; for (int i=-1; i<=1; i++) { for (int j=-1; j<=1; j++) { if (i==0 && j==0) continue; if (allZeros && (r+i>-1) && (r+i<rows) && (c+j>-1) && (c+j<cols) && (thresholdImage.at<uchar>(r+i,c+j)==p)) { allZeros = false; } } } if (allZeros == true) { thresholdImage.at<uchar>(r,c) = 0; } } } thresholdImage.copyTo(contourImage); cv::findContours(contourImage, contours, cv::RETR_LIST, cv::CHAIN_APPROX_SIMPLE); int idx = 0; size_t area = 0; for (size_t i = 0; i < contours.size(); i++) { if (area < contours[i].size() ) { idx = i; area = contours[i].size(); } } cv::Rect rec = cv::boundingRect(contours[idx]); regionOfInterest = thresholdImage(rec); cv::resize(regionOfInterest,outImage, cv::Size(sizex, sizey)); outImage = outImage.reshape(0, sizeimage).t(); }
cv::Point ColorTracker::getCoM(cv::Mat target) { // Get the thresholded image cv::Mat imgThresh = thresholdImage(target); // Perform a morphological opening cv::Mat eElement = cv::getStructuringElement( cv::MORPH_ELLIPSE, cv::Size(2*mParameters.erosionRadius+1,2*mParameters.erosionRadius+1), cv::Point(mParameters.erosionRadius,mParameters.erosionRadius)); cv::Mat dElement = cv::getStructuringElement( cv::MORPH_ELLIPSE, cv::Size(2*mParameters.dilationRadius+1,2*mParameters.dilationRadius+1), cv::Point(mParameters.dilationRadius,mParameters.dilationRadius)); cv::erode(imgThresh, imgThresh, eElement); cv::dilate(imgThresh, imgThresh, dElement); // Possibly show image if (mParameters.displayThresholdedImage) { cv::imshow("thresh",imgThresh); } // Calculate the moments to estimate the position of the ball cv::Moments moments = cv::moments(imgThresh, 1); // The actual moment values double moment10 = moments.m10; double moment01 = moments.m01; double area = moments.m00; // If no color detected, return an invalid index if (area == 0) { return cv::Point(-1, -1); } // Holding the last and current ball positions int posX = 0; int posY = 0; posX = (int)(moment10/area); posY = (int)(moment01/area); imgThresh.release(); return cv::Point(posX, posY); }
void PreProcessImage(const Mat& inImage, Mat& outImage, int sizex, int sizey) { Mat grayImage, blurredImage, thresholdImage, contourImage, regionOfInterest; vector<vector<Point> > contours; try{ cvtColor(inImage, grayImage, COLOR_BGR2GRAY); } catch (exception& ex){ grayImage = inImage.clone(); } GaussianBlur(grayImage, blurredImage, Size(5, 5), 2, 2); adaptiveThreshold(blurredImage, thresholdImage, 255, 1, 1, 11, 2); thresholdImage.copyTo(contourImage); findContours(contourImage, contours, RETR_LIST, CHAIN_APPROX_SIMPLE); int idx = 0; size_t area = 0; for (size_t i = 0; i < contours.size(); i++) { if (area < contours[i].size()) { idx = i; area = contours[i].size(); } } Rect rec = boundingRect(contours[idx]); regionOfInterest = thresholdImage(rec); resize(regionOfInterest, outImage, Size(sizex, sizey)); }
Mat ScanProc::DetectLaser2(Mat &laserOn, Mat &laserOff) { //some parameter need to be tuned int laserMagnitudeThreshold = 20; //default: 10 int maxLaserWidth = 40, minLaserWidth = 3; //default: 40, 3 int rangeDistanceThreshold = 5; //default: 5 int& rows = laserOff.rows; int& cols = laserOff.cols; Mat grayLaserOn(rows, cols, CV_8U, Scalar(0)); Mat grayLaserOff(rows, cols, CV_8U, Scalar(0)); Mat diffImage(rows, cols, CV_8U, Scalar(0)); Mat thresholdImage(rows, cols, CV_8U, Scalar(0)); Mat rangeImage(rows, cols, CV_8U, Scalar(0)); Mat result(rows, cols, CV_8U, Scalar(0)); // convert to grayscale cvtColor(laserOff, grayLaserOff, CV_BGR2GRAY); cvtColor(laserOn, grayLaserOn, CV_BGR2GRAY); subtract(grayLaserOn, grayLaserOff, diffImage); //const double MAX_MAGNITUDE_SQ = 255 * 255 * 3; // The maximum pixel magnitude sq we can see //const double INV_MAX_MAGNITUDE_SQ = 1.0f / MAX_MAGNITUDE_SQ; const int width = grayLaserOff.cols; const int height = grayLaserOff.rows; //unsigned components = before.getNumComponents(); //unsigned rowStep = width * components; int numLocations = 0; int numMerged = 0; int numPixelsOverThreshold = 0; // The location that we last detected a laser line int maxNumLocations = height; int firstRowLaserCol = width / 2; int prevLaserCol = firstRowLaserCol; LaserRange* laserRanges = new LaserRange[width + 1]; //unsigned char * ar = a; //unsigned char * br = b; //unsigned char * dr = d; for (unsigned iRow = 0; iRow < height && numLocations < maxNumLocations; iRow++) { // The column that the laser started and ended on int numLaserRanges = 0; laserRanges[numLaserRanges].startCol = -1; laserRanges[numLaserRanges].endCol = -1; int numRowOut = 0; int imageColumn = 0; for (unsigned iCol = 0; iCol < width; iCol += 1) { int mag = diffImage.at<uchar>(iRow, iCol); // Compare it against the threshold if (mag > laserMagnitudeThreshold) { thresholdImage.at<uchar>(iRow, iCol) = 255; // Flag that this pixel was over the threshold value numPixelsOverThreshold++; // The start of pixels with laser in them if (laserRanges[numLaserRanges].startCol == -1) { laserRanges[numLaserRanges].startCol = imageColumn; } } // The end of pixels with laser in them else if (laserRanges[numLaserRanges].startCol != -1) { int laserWidth = imageColumn - laserRanges[numLaserRanges].startCol; if (laserWidth <= maxLaserWidth && laserWidth >= minLaserWidth) { // If this range was real close to the previous one, merge them instead of creating a new one bool wasMerged = false; if (numLaserRanges > 0) { unsigned rangeDistance = laserRanges[numLaserRanges].startCol - laserRanges[numLaserRanges - 1].endCol; if (rangeDistance < rangeDistanceThreshold) { laserRanges[numLaserRanges - 1].endCol = imageColumn; laserRanges[numLaserRanges - 1].centerCol = round((laserRanges[numLaserRanges - 1].startCol + laserRanges[numLaserRanges - 1].endCol) / 2); wasMerged = true; numMerged++; } } // Proceed to the next laser range if (!wasMerged) { // Add this range as a candidate laserRanges[numLaserRanges].endCol = imageColumn; laserRanges[numLaserRanges].centerCol = round((laserRanges[numLaserRanges].startCol + laserRanges[numLaserRanges].endCol) / 2); numLaserRanges++; } // Reinitialize the range laserRanges[numLaserRanges].startCol = -1; laserRanges[numLaserRanges].endCol = -1; } // There was a false positive else { laserRanges[numLaserRanges].startCol = -1; } } // Go from image components back to image pixels imageColumn++; } // foreach column // If we have a valid laser region if (numLaserRanges > 0) { for (int i = 0; i < numLaserRanges; i++) { for (int j = laserRanges[i].startCol; j < laserRanges[i].endCol; j++) { rangeImage.at<uchar>(iRow, j) = 255; } } int rangeChoice = detectBestLaserRange(laserRanges, numLaserRanges, prevLaserCol); prevLaserCol = laserRanges[rangeChoice].centerCol; int centerCol = detectLaserRangeCenter(iRow, laserRanges[rangeChoice], diffImage); result.at<uchar>(iRow, centerCol) = 255; //laserLocations[numLocations].x = centerCol; //laserLocations[numLocations].y = iRow; // If this is the first row that a laser is detected in, set the firstRowLaserCol member if (numLocations == 0) { firstRowLaserCol = laserRanges[rangeChoice].startCol; } numLocations++; } } // foreach row pPreviewWnd->updateFrame(grayLaserOff, "laser off"); msleep(LASER_LINE_DETECTION_PREVIEW_DELAY); pPreviewWnd->updateFrame(grayLaserOn, "laser on"); msleep(LASER_LINE_DETECTION_PREVIEW_DELAY); pPreviewWnd->updateFrame(diffImage, "diff"); msleep(LASER_LINE_DETECTION_PREVIEW_DELAY); pPreviewWnd->updateFrame(thresholdImage, "threshold"); msleep(LASER_LINE_DETECTION_PREVIEW_DELAY); pPreviewWnd->updateFrame(rangeImage, "laser range"); msleep(LASER_LINE_DETECTION_PREVIEW_DELAY); pPreviewWnd->updateFrame(result, "result"); msleep(LASER_LINE_DETECTION_PREVIEW_DELAY); return result; }
void imageCb(const sensor_msgs::ImageConstPtr& msg) { //cv_bridge::CvImagePtr cvPtr = getThreshImageColor( msg ); //cv_bridge::CvImagePtr cvPtr = getThreshImageInfrared( msg ); cv_bridge::CvImagePtr cvPtr = getImage( msg ); equalizeHistogram( cvPtr ); // static int counter = 0; // if( counter++ % 128 == 0 ) // { // every 128 frames do a full match cv_bridge::CvImagePtr threshImagePtr(new cv_bridge::CvImage( cvPtr->header, cvPtr->encoding, cvPtr->image.clone() ) ); thresholdImage( threshImagePtr, /* byColor = */ false ); cv::vector< cv::vector<cv::Point> > contours; // applyMorphology( threshImagePtr ); contours = getContours( threshImagePtr->image ); cv::vector< double > areaBasedProbs = getAreaBasedProbs( contours ); // cv::vector< double > momentMatchBasedProbs = getMomentMatchBasedProbs( contours ); // cv::vector< double > surfMatchBasedProbs = getSurfMatchBasedProbs( contours, cvPtr ); double maxProb = 0; int chosenContour = -1; for( int i = 0; i < contours.size(); i++ ) { double prob = areaBasedProbs[i]; // * momentMatchBasedProbs[i] * surfMatchBasedProbs[i]; if( prob > maxProb ) { maxProb = 0; chosenContour = i; } } // Draw centers & contours cv::Mat drawing = cv::Mat::zeros( cvPtr->image.size(), CV_8UC3 ); for( int i = 0; i < contours.size(); i++ ) { if( i == chosenContour ) continue; cv::Scalar color = cv::Scalar( 50, 150, 150 ); cv::drawContours( drawing, contours, i, color, 2, 8 ); } geometry_msgs::Point point; if( chosenContour > -1 ) { // Get the mass center of the chosen contour cv::Moments chosenMoments = cv::moments( contours[chosenContour], false ); cv::Point foundCenter = cv::Point( chosenMoments.m10/chosenMoments.m00, chosenMoments.m01/chosenMoments.m00 ); cv::drawContours( drawing, contours, chosenContour, cv::Scalar( 255, 255, 255 ), 2, 8 ); cv::circle( drawing, foundCenter, 10, cv::Scalar( 255, 255, 255 ), -1, 8, 0 ); point.x = foundCenter.x * 100.0 / cvPtr->image.size().width; point.y = foundCenter.y * 100.0 / cvPtr->image.size().height; // distance is the size of the contour relative to targetSize_; double contourArea = cv::contourArea( contours[chosenContour] ); double imageArea = cvPtr->image.size().width * cvPtr->image.size().height; // 50 means we are at the target size, lower too close, higher too far // if contourArea/imageArea is very close to 1 something is wrong, // regard it as a miss if( fabs( contourArea/imageArea - 1.0 ) < .05 ) { point.x = point.y = 0.0; point.z = -1.0; missesInARow_++; } else { point.z = fmax( ( targetSize_ - contourArea/imageArea * 100.0 ) / 2.0 + 50.0, 0.0 ); missesInARow_ = 0; } } else { point.x = point.y = 0.0; point.z = -1.0; missesInARow_++; } if( missesInARow_ > 30 || pointPublished_.z == -1.0 ) { pointPublished_ = point; missesInARow_ = 0; } else if( point.z != -1.0 ) { pointPublished_.x = ( point.x + pointPublished_.x * dampingFactor_ ) / ( 1.0 + dampingFactor_ ); pointPublished_.y = ( point.y + pointPublished_.y * dampingFactor_ ) / ( 1.0 + dampingFactor_ ); pointPublished_.z = ( point.z + pointPublished_.z * dampingFactor_ ) / ( 1.0 + dampingFactor_ ); } // draw the pointPublished cv::Point drawPointPublished( pointPublished_.x * cvPtr->image.size().width / 100.0, pointPublished_.y * cvPtr->image.size().height / 100.0 ); cv::circle( drawing, drawPointPublished, 5, cv::Scalar( 255, 0, 255 ), -1, 8, 0 ); // Publish cv_bridge::CvImage drawingImage( cvPtr->header, enc::BGR8, drawing ); centersPub_.publish( drawingImage.toImageMsg() ); pointPub_.publish( pointPublished_ ); }
int main(int argc, char **argv) { int commandArgs = 1; int i; char x; char performContrast = 0; char fft_filename[FILENAME_LENGTH]; char cdf_filename[FILENAME_LENGTH]; char histo_filename[FILENAME_LENGTH]; float contrastLow = 0.0; float contrastHigh = 0.0; float highpasslevel; float lowpasslevel; float percentCorrupt; float sigma; float brightness; float sat; int m; int replaceWithM; int performHistogram = 0; int performCDF = 0; int performFFT = 0; int performVectorMedianFilter = 0; int performMedianFilter = 0; int performMeanFilter = 0; int performSpacialFilter = 0; int performLevelSlicing = 0; int performEqualize = 0; int performColorScale = 0; int performSpatialReduceWidth = 0; int performSpatialReduceHeigth = 0; int performHighpass = 0; int performLowpass = 0; int performComponentMedianFilter = 0; int performVectorOrderStatistic = 0; int performVectorSpacialOrderStatistic = 0; int performVectorMedianOrderStatistic = 0; int performMinkowskiAddition = 0; int performMinkowskiSubtraction = 0; int performMinkowskiOpening = 0; int performMinkowskiClosing = 0; int performEdgeDetectOne = 0; int performEdgeDetectTwo = 0; int performEdgeDetectThree = 0; int performEdgeDetectFour = 0; int performAddGaussianNoise = 0; int performAddSaltPepperNoise = 0; int performSetBrightness = 0; int performSetSaturation = 0; int performBrightFilter = 0; int imageWriteNecessary = 0; int maskwidth = 0; int maskheight = 0; int maskrepeat = 0; FILE *in = stdin; FILE *out = stdout; struct portImage *pi; if (argc < 3) { printf("\n"); printf(" Usage: %s [inputFile] ([outputFile]) [option] ([option] ...)\n", argv[0]); printf("\n"); printf(" InputFile: Either a filename or '-' for stdin.\n"); printf(" OutputFile: Either a filename or '-' for stdout. (Not needed if no output necessary.)\n"); printf("\n"); printf(" Options:\n"); printf(" -ghisto FILENAME Graph Histogram\n"); printf(" -gcdf FILENAME Graph Cumulative Distribution\n"); printf(" -gfft FILENAME Graph FFT plot\n"); printf(" -color n Reduce color scale to n\n"); printf(" -spatial WIDTH-HEIGHT Perform spacial reduction to Width and Height\n"); printf(" -level n Perform level slicing from graylevel n to graylevel n+10\n"); printf(" -con LOW-HIGH Scale image contrast from LOW graylevel percentage to HIGH graylevel percentage\n"); printf(" -equ Histogram Eqaulization\n"); printf(" -medianf n Simple Median Filter of window size n*n\n"); printf(" -meanf n Simple Mean Filter of window size n*n\n"); printf(" -cmf n Component Median Filter of window size n*n\n"); printf(" -vmf n Vector Median Filter of window n*n\n"); printf(" -sf Spacial Filter\n"); printf(" -vos n v Vector Order Stat of window size n*n and value v\n"); printf(" -vmos n m [01] Vector Median Order Stat of window size n*n, m threshold, and 0 or 1(True) replace with m\n"); printf(" -vsos n m [01] Vector Spacial Order Stat of window size n*n, m threshold, and 0 or 1(True) replace with m\n"); printf(" -brightf n Perform an Brightness filter using HSV colorspace on size n*n window.\n"); printf(" -bright %% Set brightness to %% percent\n"); printf(" -sat %% Set saturation to %% percent\n"); printf(" -hp %% Highpass filter of %% percent\n"); printf(" -lp %% Lowpass filter of %% percent\n"); printf(" -ma NxM R Perform Minkowski Addition using NxM mask, repeated R times.\n"); printf(" -ms NxM R Perform Minkowski Subtraction using NxM mask, repeated R times.\n"); printf(" -mo NxM R Perform Minkowski Opening using NxM mask, repeated R times.\n"); printf(" -mc NxM R Perform Minkowski Closing using NxM mask, repeated R times.\n"); printf(" -e1 Perform Edge Detection using X/(X – B)\n"); printf(" -e2 Perform Edge Detection using (X + B)/X\n"); printf(" -e3 Perform Edge Detection using [(X+B)/(X-B)]-B\n"); printf(" -e4 n Experimental Edge Detection on Color Images using n*n window.\n"); printf(" -noiseG p s Add Gaussian noise to p (0 to 1 floating) percent of image with s sigma noise.\n"); printf(" -noiseSP p Add Salt and Pepper noise to p (0 to 1 floating) percent of image.\n"); printf("\n"); return(1); } if (strcmp(argv[commandArgs], "-") != 0) { in = fopen(argv[1],"r"); if (in == NULL) { fprintf(stderr, "File '%s' failed to open for reading.\n", argv[1]); exit(1); } } commandArgs++; if (strcmp(argv[commandArgs], "-") != 0 && argv[commandArgs][0] != '-') { commandArgs++; out = fopen(argv[2],"w"); if (out == NULL) { fprintf(stderr, "File '%s' failed to open for writing.\n", argv[2]); exit(1); } } for (; commandArgs < argc; commandArgs++) { if (strcmp(argv[commandArgs], "-color") == 0) { imageWriteNecessary = 1; commandArgs++; sscanf(argv[commandArgs], "%d", &performColorScale); } if (strcmp(argv[commandArgs], "-spatial") == 0) { imageWriteNecessary = 1; commandArgs++; sscanf(argv[commandArgs], "%d%c%d", &performSpatialReduceWidth, &x, &performSpatialReduceHeigth); } if (strcmp(argv[commandArgs], "-level") == 0) { imageWriteNecessary = 1; commandArgs++; sscanf(argv[commandArgs], "%d", &performLevelSlicing); } if (strcmp(argv[commandArgs], "-con") == 0) { imageWriteNecessary = 1; commandArgs++; sscanf(argv[commandArgs], "%f%c%f", &contrastLow, &performContrast, &contrastHigh); } if (strcmp(argv[commandArgs], "-vos") == 0) { imageWriteNecessary = 1; commandArgs++; sscanf(argv[commandArgs], "%d", &performVectorOrderStatistic); commandArgs++; sscanf(argv[commandArgs], "%d", &m); } if (strcmp(argv[commandArgs], "-vmf") == 0) { imageWriteNecessary = 1; commandArgs++; sscanf(argv[commandArgs], "%d", &performVectorMedianFilter); } if (strcmp(argv[commandArgs], "-sf") == 0) { imageWriteNecessary = 1; performSpacialFilter = 1; } if (strcmp(argv[commandArgs], "-vmos") == 0) { imageWriteNecessary = 1; commandArgs++; sscanf(argv[commandArgs], "%d", &performVectorMedianOrderStatistic); commandArgs++; sscanf(argv[commandArgs], "%d", &m); commandArgs++; sscanf(argv[commandArgs], "%d", &replaceWithM); } if (strcmp(argv[commandArgs], "-vsos") == 0) { imageWriteNecessary = 1; commandArgs++; sscanf(argv[commandArgs], "%d", &performVectorSpacialOrderStatistic); commandArgs++; sscanf(argv[commandArgs], "%d", &m); commandArgs++; sscanf(argv[commandArgs], "%d", &replaceWithM); } if (strcmp(argv[commandArgs], "-cmf") == 0) { imageWriteNecessary = 1; commandArgs++; sscanf(argv[commandArgs], "%d", &performComponentMedianFilter); } if (strcmp(argv[commandArgs], "-medianf") == 0) { imageWriteNecessary = 1; commandArgs++; sscanf(argv[commandArgs], "%d", &performMedianFilter); } if (strcmp(argv[commandArgs], "-meanf") == 0) { imageWriteNecessary = 1; commandArgs++; sscanf(argv[commandArgs], "%d", &performMeanFilter); } if (strcmp(argv[commandArgs], "-equ") == 0) { imageWriteNecessary = 1; performEqualize = 1; } if (strcmp(argv[commandArgs], "-hp") == 0) { imageWriteNecessary = 1; commandArgs++; performHighpass = 1; sscanf(argv[commandArgs], "%f", &highpasslevel); } if (strcmp(argv[commandArgs], "-lp") == 0) { imageWriteNecessary = 1; commandArgs++; performLowpass = 1; sscanf(argv[commandArgs], "%f", &lowpasslevel); } if (strcmp(argv[commandArgs], "-brightf") == 0) { imageWriteNecessary = 1; commandArgs++; sscanf(argv[commandArgs], "%d", &performBrightFilter); } if (strcmp(argv[commandArgs], "-bright") == 0) { imageWriteNecessary = 1; commandArgs++; performSetBrightness = 1; sscanf(argv[commandArgs], "%f", &brightness); } if (strcmp(argv[commandArgs], "-sat") == 0) { imageWriteNecessary = 1; commandArgs++; performSetSaturation = 1; sscanf(argv[commandArgs], "%f", &sat); } if (strcmp(argv[commandArgs], "-ghisto") == 0) { commandArgs++; performHistogram = 1; strncpy(histo_filename, argv[commandArgs], FILENAME_LENGTH); } if (strcmp(argv[commandArgs], "-gcdf") == 0) { commandArgs++; performCDF = 1; strncpy(cdf_filename, argv[commandArgs], FILENAME_LENGTH); } if (strcmp(argv[commandArgs], "-gfft") == 0) { commandArgs++; performFFT = 1; strncpy(fft_filename, argv[commandArgs], FILENAME_LENGTH); } if (strcmp(argv[commandArgs], "-ma") == 0) { imageWriteNecessary = 1; performMinkowskiAddition = 1; commandArgs++; sscanf(argv[commandArgs], "%d%c%d", &maskwidth, &x, &maskheight); commandArgs++; sscanf(argv[commandArgs], "%d", &maskrepeat); } if (strcmp(argv[commandArgs], "-ms") == 0) { imageWriteNecessary = 1; performMinkowskiSubtraction = 1; commandArgs++; sscanf(argv[commandArgs], "%d%c%d", &maskwidth, &x, &maskheight); commandArgs++; sscanf(argv[commandArgs], "%d", &maskrepeat); } if (strcmp(argv[commandArgs], "-mo") == 0) { imageWriteNecessary = 1; performMinkowskiOpening = 1; commandArgs++; sscanf(argv[commandArgs], "%d%c%d", &maskwidth, &x, &maskheight); commandArgs++; sscanf(argv[commandArgs], "%d", &maskrepeat); } if (strcmp(argv[commandArgs], "-mc") == 0) { imageWriteNecessary = 1; performMinkowskiClosing = 1; commandArgs++; sscanf(argv[commandArgs], "%d%c%d", &maskwidth, &x, &maskheight); commandArgs++; sscanf(argv[commandArgs], "%d", &maskrepeat); } if (strcmp(argv[commandArgs], "-e1") == 0) { imageWriteNecessary = 1; performEdgeDetectOne = 1; } if (strcmp(argv[commandArgs], "-e2") == 0) { imageWriteNecessary = 1; performEdgeDetectTwo = 1; } if (strcmp(argv[commandArgs], "-e3") == 0) { imageWriteNecessary = 1; performEdgeDetectThree = 1; } if (strcmp(argv[commandArgs], "-e4") == 0) { imageWriteNecessary = 1; commandArgs++; sscanf(argv[commandArgs], "%d", &performEdgeDetectFour); } if (strcmp(argv[commandArgs], "-noiseG") == 0) { imageWriteNecessary = 1; performAddGaussianNoise = 1; commandArgs++; sscanf(argv[commandArgs], "%f", &percentCorrupt); commandArgs++; sscanf(argv[commandArgs], "%f", &sigma); } if (strcmp(argv[commandArgs], "-noiseSP") == 0) { imageWriteNecessary = 1; performAddSaltPepperNoise = 1; commandArgs++; sscanf(argv[commandArgs], "%f", &percentCorrupt); } } pi = readImage(in); if (performHighpass || performLowpass || performFFT) { FFT2D(pi); if (performHighpass) highpass(pi, highpasslevel); if (performLowpass) lowpass(pi, lowpasslevel); if (performFFT) graph_fftlogplot(pi, fft_filename); IFFT2D(pi); } if (performEdgeDetectOne || performEdgeDetectTwo || performEdgeDetectThree || performMinkowskiAddition || performMinkowskiSubtraction || performMinkowskiOpening || performMinkowskiClosing) thresholdImage(pi); if (performAddGaussianNoise) addGaussianNoise(pi, percentCorrupt, sigma); if (performAddSaltPepperNoise) addSaltPepperNoise(pi, percentCorrupt); if (performMedianFilter) simpleMedianFilter(pi, performMedianFilter); if (performMeanFilter) simpleMeanFilter(pi, performMeanFilter); if (performComponentMedianFilter) componentMedianFilter(pi, performComponentMedianFilter); if (performVectorOrderStatistic) vectorOrderStatistic(pi, performVectorOrderStatistic, m); if (performVectorSpacialOrderStatistic) vectorSpacialOrderStatistic(pi, performVectorSpacialOrderStatistic, m, replaceWithM); if (performVectorMedianOrderStatistic) vectorMedianOrderStatistic(pi, performVectorMedianOrderStatistic, m, replaceWithM); if (performVectorMedianFilter) vectorMedianFilter(pi, performVectorMedianFilter); if (performSpacialFilter) spacialFilter(pi); if (performBrightFilter) HSV_ValueFilter(pi, performBrightFilter); if (performColorScale) scale_reduce(pi,performColorScale); if (performSetBrightness) setBrightness(pi,brightness); if (performSetSaturation) setSaturation(pi,sat); if (performSpatialReduceWidth) spacial_reduce(pi,performSpatialReduceWidth, performSpatialReduceHeigth); if (performContrast) contrast_stretching(pi, contrastLow, contrastHigh); if (performLevelSlicing) level_slice(pi, performLevelSlicing); if (performEqualize) equalize(pi); if (performHistogram) graph_histogram(pi, histo_filename); if (performCDF) graph_cdf(pi, cdf_filename); if (performMinkowskiAddition) for (i = 0; i < maskrepeat; i++) minkowskiAddition(pi, maskwidth, maskheight); if (performMinkowskiSubtraction) for (i = 0; i < maskrepeat; i++) minkowskiSubtraction(pi, maskwidth, maskheight); if (performMinkowskiOpening) for (i = 0; i < maskrepeat; i++) minkowskiOpening(pi, maskwidth, maskheight); if (performMinkowskiClosing) for (i = 0; i < maskrepeat; i++) minkowskiClosing(pi, maskwidth, maskheight); if (performEdgeDetectOne) { struct portImage *pc = copyImage(pi); imageWriteNecessary = 1; minkowskiSubtraction(pc, 3, 3); minkowskiDivision(pi, pc); freeImage(pc); } if (performEdgeDetectTwo) { struct portImage *pc = copyImage(pi); imageWriteNecessary = 1; minkowskiAddition(pi, 3, 3); minkowskiDivision(pi, pc); freeImage(pc); } if (performEdgeDetectThree) { struct portImage *pd = copyImage(pi); maskrepeat = 3; imageWriteNecessary = 1; for (i = 0; i < maskrepeat; i++) { minkowskiAddition(pi, 3, 3); minkowskiSubtraction(pd, 3, 3); } minkowskiDivision(pi, pd); minkowskiSubtraction(pi, 3, 3); freeImage(pd); } if (imageWriteNecessary) writeImage(pi, out); freeImage(pi); if (in != stdin) fclose(in); if (out != stdout) fclose(out); return 0; } /* End Main */
int segmentImageOmp(frame_t *frame, frame_t *res, unsigned long *largestLabel) { //segment the image (label each connected component a different label) if (thresholdImage(frame, res) != 0) { printf("segmentImage: thresholdImage failure code\n"); return 1; } // DISCLAIMERS: L channel - binary map after thresholding image // - contains segmented image following this func // A channel - must be 0s after threshold // - "in stack" binary map use in this func only // B channel - must be 0s after threshold // Segmentation code here - watershed // START LABELS AT 1 (non-labeled remains at 0) int i, j; unsigned long label = 1; int rWidth = res->image->width; int rHeight = res->image->height; int x, y; createStack(); #pragma omp parallel for for (i = 0; i < rHeight; i++) { for (j = 0; j < rWidth; j++) { pixel_t *P; pixel_t *nP; //pValH = i; //pValW = j; // Using pVal, we'll segment surrounding pixels with the same label. if (res->image->data[i*rWidth + j].L == 0) { // Pixel did not have a value //LOG_ERR("segmentImage: Continuing with seeds, pixel off at (w,h) -> (%d, %d)\n", pValW, pValH); } else { if (res->image->data[i*rWidth + j].label >= 1) { //LOG_ERR("segmentImage: Continuing with seeds, pixel already labeled at (w,h) -> (%d, %d)\n", pValW, pValH); } else { //LOG_ERR("segmentImage: Labeling connected pixels starting at (w,h) -> (%d, %d)\n", pValW, pValH); // Add pixels to stack #pragma omp critical (resQueue) push(&res->image->data[i * rWidth + j], j, i); res->image->data[i*rWidth+j].S = 1; while(isEmpty() != 0) { #pragma omp critical (popXY) P = pop(&x, &y); if (((P->label != label) && (P->L != 0))) { P->label = label; //printf("\nAdding label to (%d, %d) to get (%d, %d, %d, %d, %lu)\n", y, x, P->L,P->A,P->B,P->S, P->label); // Add neighboring pixels within the bounds to the stack if (y-1 >= 0) { nP = &res->image->data[(y-1)*rWidth+x]; // Check if the pixel has been in the stack if (nP->S != 1) { // Check if the pixel has a value if(nP->L != 0 && nP->label != label){ #pragma omp critical (pushXYm1) push(nP, x, y-1); nP->S = 1; } } } if (y+1 < rHeight) { nP = &res->image->data[(y+1)*rWidth+x]; if (nP->S != 1) { if(nP->L != 0 && nP->label != label){ #pragma omp critical (pushXYp1) push(nP, x, y+1); nP->S = 1; } } } if (x-1 >= 0) { nP = &res->image->data[y*rWidth+(x-1)]; if (nP->S != 1) { if(nP->L != 0 && nP->label != label){ #pragma omp critical (pushXm1Y) push(nP, x-1, y); nP->S = 1; } } } if (x+1 < rWidth) { nP = &res->image->data[y*rWidth+(x+1)]; if (nP->S != 1) { if(nP->L != 0 && nP->label != label){ #pragma omp critical (pushXp1Y) push(nP, x+1, y); nP->S = 1; } } } } } } label++; } } } #pragma omp barrier // Other method of labelling pixels - sequential algo #if 0 // segment remaining pixels by looking for neighbor nearby or creating new label int val1, val2, val3, val4; for (i = 0; i <rHeight; i++){ for (j = 0; j < rWidth; j++) { val1 = 0; val2 = 0; val3 = 0; val4 = 0; // pixel has not been labelled yet if (res->image->data[i*rWidth+j].L == 1) { // give the current pixel the label of its neighbor or new label if (i-1 >= 0) val1 = res->image->data[(i-1)*rWidth+j].L; if (i+1 < rHeight) val2 = res->image->data[(i+1)*rWidth+j].L; if (j-1 >= 0) val3 = res->image->data[i*rWidth+(j-1)].L; if (j+1 < rWidth) val4 = res->image->data[i*rWidth+(j+1)].L; if (val1 > 1){ res->image->data[i*rWidth+j].L = val1; } else if (val2 > 1) { res->image->data[i*rWidth+j].L = val2; } else if (val3 > 1) { res->image->data[i*rWidth+j].L = val3; } else if (val4 > 1) { res->image->data[i*rWidth+j].L = val4; } else { res->image->data[i*rWidth+j].L = label; label++; } } } } #endif *largestLabel = label; return 0; }
int process(VideoCapture& capture) { long captureTime; cout << "Press q or escape to quit!" << endl; CvFont infoFont; cvInitFont(&infoFont, CV_FONT_HERSHEY_SIMPLEX, 1, 1); namedWindow(VIDEO_WINDOW_NAME, CV_WINDOW_AUTOSIZE); namedWindow(ERODE_PREVIEW_WIN_NAME, CV_WINDOW_NORMAL); resizeWindow(ERODE_PREVIEW_WIN_NAME, 320, 240); ControlsWindow* controlsWindow = new ControlsWindow(); if(fileExists(preferenceFileName)) { loadSettings(controlsWindow, (char*)preferenceFileName); } Mat frame; while (true) { capture >> frame; captureTime = (int)(getTickCount()/getTickFrequency())*1000; if (frame.empty()) break; int target_width = 320; int height = (target_width/capture.get(3 /*width*/)) * capture.get(4 /*height*/); resize(frame, frame, Size(target_width, height)); if (controlsWindow->getBlurDeviation() > 0) { GaussianBlur(frame, frame, Size(GAUSSIAN_KERNEL, GAUSSIAN_KERNEL), controlsWindow->getBlurDeviation()); } //Apply brightness and contrast frame.convertTo(frame, -1, controlsWindow->getContrast(), controlsWindow->getBrightness()); Mat maskedImage = thresholdImage(controlsWindow, frame); Mat erodedImage = erodeDilate(maskedImage, controlsWindow); Mat erodedImageBinary; cvtColor(erodedImage, erodedImageBinary, COLOR_BGR2GRAY); threshold(erodedImageBinary, erodedImageBinary, 0, 255, CV_THRESH_BINARY); if(controlsWindow->getInvert()) { erodedImageBinary = 255 - erodedImageBinary; } cv::SimpleBlobDetector::Params params; params.minDistBetweenBlobs = 50.0f; params.filterByInertia = false; params.filterByConvexity = false; params.filterByColor = true; params.filterByCircularity = false; params.filterByArea = true; params.minArea = 1000.0f; params.maxArea = 100000.0f; params.blobColor = 255; vector<KeyPoint> centers; vector<vector<Point>> contours; ModBlobDetector* blobDetector = new ModBlobDetector(params); vector<vector<Point>> contourHulls; vector<RotatedRect> contourRects; blobDetector->findBlobs(erodedImageBinary, erodedImageBinary, centers, contours); for(vector<Point> ctpts : contours) { vector<Point> hull; convexHull(ctpts, hull); contourHulls.push_back(hull); contourRects.push_back(minAreaRect(hull)); } #ifdef DEBUG_BLOBS drawContours(frame, contours, -1, Scalar(128,255,128), 2, CV_AA); drawContours(frame, contourHulls, -1, Scalar(255, 128,0), 2, CV_AA); int ptnum; for(KeyPoint pt : centers) { Scalar color(255, 0, 255); circle(frame, pt.pt, 5 , color, -1 /*filled*/, CV_AA); circle(frame, pt.pt, pt.size, color, 1, CV_AA); ptnum++; } #endif for(RotatedRect rr : contourRects) { Point2f points[4]; rr.points(points); float side1 = distance(points[0], points[1]); float side2 = distance(points[1], points[2]); float shortestSide = min(side1, side2); float longestSide = max(side1, side2); float aspectRatio = longestSide/shortestSide; int b = 0; bool isTape = objInfo.aspectRatio == 0 ? false : abs(objInfo.aspectRatio - aspectRatio) < 0.2*objInfo.aspectRatio; /* * TODO * Make a list of possible tape candidates * Use tape candidate with smallest difference in ratio to the real ratio as the tape */ if(isTape) { b = 255; string widthText = "Width (px): "; widthText.append(toString(longestSide)); string heightText = "Height (px): "; heightText.append(toString(shortestSide)); string rotText = "Rotation (deg): "; rotText.append(toString(abs((int)rr.angle))); string distText; if(camSettings.focalLength == -1) { distText = "Focal length not defined"; } else { float dist = objInfo.width * camSettings.focalLength / longestSide; distText = "Distance (cm): "; distText.append(toString(dist)); } putText(frame, widthText, Point(0, 20), CV_FONT_HERSHEY_SIMPLEX, 0.5f, Scalar(0, 255, 255)); putText(frame, heightText, Point(0, 40), CV_FONT_HERSHEY_SIMPLEX, 0.5f, Scalar(0, 255, 255)); putText(frame, rotText, Point(0, 60), CV_FONT_HERSHEY_SIMPLEX, 0.5f, Scalar(0, 255, 255)); putText(frame, distText, Point(0, 80), CV_FONT_HERSHEY_SIMPLEX, 0.5f, Scalar(0, 255, 255)); } rotated_rect(frame, rr, Scalar(b, 0, 255)); if(isTape)break; } if(objInfo.aspectRatio == 0) { putText(frame, "Invalid object info (object.xml)", Point(0, 20), CV_FONT_HERSHEY_SIMPLEX, 0.5f, Scalar(0, 255, 255)); } delete blobDetector; imshow(ERODE_PREVIEW_WIN_NAME, erodedImageBinary); imshow(VIDEO_WINDOW_NAME, frame); //int waitTime = max((int)(((1.0/framerate)*1000) // - ((int)(getTickCount()/getTickFrequency())*1000 - captureTime)) // , 1); char key = (char)waitKey(1); switch (key) { case 'q': case 'Q': case 27: //escape saveSettings(controlsWindow, (char*)preferenceFileName); return 0; default: break; } std::this_thread::yield(); } saveSettings(controlsWindow, (char*)preferenceFileName); delete(controlsWindow); destroyAllWindows(); return 0; }
int main(int argc, char *argv[]){ if (argc < 3) { printf("USAGE: ./fullTest <first_image_filename> <second_image_filename>\n"); return 1; } double startTime = CycleTimer::currentSeconds(); double deltaTime; char *filename1 = argv[1]; char *filename2 = argv[2]; frame_t *frame1 = (frame_t *) malloc(sizeof(struct frame_s)); frame_t *frame2 = (frame_t *) malloc(sizeof(struct frame_s)); frame_t *res1 = (frame_t *) malloc(sizeof(struct frame_s)); frame_t *res2 = (frame_t *) malloc(sizeof(struct frame_s)); frame_t *res3 = (frame_t *) malloc(sizeof(struct frame_s)); frame_t *res4 = (frame_t *) malloc(sizeof(struct frame_s)); readImageFrame(frame1, filename1); readImageFrame(frame2, filename2); res1 = copyFrame(frame1); res2 = copyFrame(frame1); res3 = copyFrame(frame2); res4 = copyFrame(frame2); deltaTime = CycleTimer::currentSeconds() - startTime; printf("CopyFrame finished, elapsed time = %f seconds\n", deltaTime); frameToJPG(frame1, "full1.jpg"); frameToJPG(frame2, "full2.jpg"); startTime = CycleTimer::currentSeconds(); if (frameSubtraction(frame1, frame2, res1) != 0) { printf("Error in frame subtraction\n"); } deltaTime = CycleTimer::currentSeconds() - startTime; printf("FrameSubtraction finished, time = %f seconds\n", deltaTime); frameToJPG(res1, "full3.jpg"); startTime = CycleTimer::currentSeconds(); if (thresholdImage(res1, res2) != 0) { printf("Error in thresholdImage\n"); } deltaTime = CycleTimer::currentSeconds() - startTime; printf("thresholdImage finished, time = %f seconds\n", deltaTime); frameToJPG(res2, "full4.jpg"); startTime = CycleTimer::currentSeconds(); if (blobDetection(res2) != 0) { printf("Error in blob detection\n"); } deltaTime = CycleTimer::currentSeconds() - startTime; printf("BlobDetection Finished, time = %f seconds\n", deltaTime); // res2 = copyFrame(res1); if (drawBoxOnImage(res2, res3) != 0) { printf("Error in draw box on image\n"); exit(1); } printf("Writing frame\n"); frameToJPG(res3, "full5.jpg"); printf("Write to frame complete, please check file = full5.jpg\n"); startTime = CycleTimer::currentSeconds(); // merge boxes if (mergeBoxes(res2) != 0) { printf("Error in mergeBoxes\n"); exit(1); } deltaTime = CycleTimer::currentSeconds() - startTime; printf("mergeBoxes Finished, time = %f seconds\n", deltaTime); if (drawBoxOnImage(res2, res4) != 0) { printf("Error in draw box on image\n"); exit(1); } printf("Writing frame\n"); frameToJPG(res4, "full6.jpg"); printf("Write to frame complete, please check file = full6.jpg\n"); printf("Freeing frames\n"); int err = freeFrame(frame1); err += freeFrame(frame2); err += freeFrame(res1); err += freeFrame(res2); err += freeFrame(res3); err += freeFrame(res4); if (err != 0) { printf("Unable to free frame\n"); return 1; } return 0; }
int segmentImage(frame_t *frame, frame_t *res, int *largestLabel) { //segment the image (label each connected component a different label) if (thresholdImage(frame, res) != 0) { printf("segmentImage: thresholdImage failure code\n"); return 1; } // Segmentation code here - watershed // START LABELS AT 2 (non-labeled remains at 0) int i, j, pValW, pValH, label = 2; int rWidth = res->image->width; int rHeight = res->image->height; pixel_t *P; int x, y; createStack(); for (i = 0; i < rHeight; i++) { for (j = 0; j < rWidth; j++) { pValH = i; pValW = j; // Using pVal, we'll segment surrounding pixels with the same label. if (res->image->data[pValH*rWidth + pValW].L == 0) { // Pixel did not have a value //LOG_ERR("segmentImage: Continuing with seeds, pixel off at (w,h) -> (%d, %d)\n", pValW, pValH); } else if (res->image->data[pValH*rWidth + pValW].L > 1) { //LOG_ERR("segmentImage: Continuing with seeds, pixel already labeled at (w,h) -> (%d, %d)\n", pValW, pValH); } else { LOG_ERR("segmentImage: Labeling connected pixels starting at (w,h) -> (%d, %d)\n", pValW, pValH); // Add pixels to stack push(&res->image->data[pValH * rWidth + pValW], pValW, pValH); while(isEmpty() != 0) { P = pop(&x, &y); if (P->L == 1) { P->L = label; // Add neighboring pixels within the bounds to the stack if (y-1 >= 0) { if(res->image->data[(y-1)*rWidth+x].L == 1){ push(&res->image->data[(y-1)*rWidth+x], x, y-1); } } if (y+1 < rHeight) { if (res->image->data[(y+1)*rWidth+x].L == 1) { push(&res->image->data[(y+1)*rWidth+x], x, y+1); } } if (x-1 >= 0) { if(res->image->data[y*rWidth+(x-1)].L == 1) { push(&res->image->data[y*rWidth+(x-1)], x-1, y); } } if (x+1 < rWidth) { if(res->image->data[y*rWidth+(x+1)].L == 1) { push(&res->image->data[y*rWidth+(x+1)], x+1, y); } } } } } label++; } } // Other method of labelling pixels - sequential algo #if 0 // segment remaining pixels by looking for neighbor nearby or creating new label int val1, val2, val3, val4; for (i = 0; i <rHeight; i++){ for (j = 0; j < rWidth; j++) { val1 = 0; val2 = 0; val3 = 0; val4 = 0; // pixel has not been labelled yet if (res->image->data[i*rWidth+j].L == 1) { // give the current pixel the label of its neighbor or new label if (i-1 >= 0) val1 = res->image->data[(i-1)*rWidth+j].L; if (i+1 < rHeight) val2 = res->image->data[(i+1)*rWidth+j].L; if (j-1 >= 0) val3 = res->image->data[i*rWidth+(j-1)].L; if (j+1 < rWidth) val4 = res->image->data[i*rWidth+(j+1)].L; if (val1 > 1){ res->image->data[i*rWidth+j].L = val1; } else if (val2 > 1) { res->image->data[i*rWidth+j].L = val2; } else if (val3 > 1) { res->image->data[i*rWidth+j].L = val3; } else if (val4 > 1) { res->image->data[i*rWidth+j].L = val4; } else { res->image->data[i*rWidth+j].L = label; label++; } } } } #endif *largestLabel = label; return 0; }