Mat floodFillPostprocess( Mat& img) { /** Finds connected components in the input image img. The similarity is based on color and intensity of neighbouring pixels. Filters the connected components based on size and color (here color bounds are loose). @param: 1. img : The input image @return: 2. maskOut: the mask (single channel, binary image) representing the connected components. The connected compoenets are filtered on the size. "Appropriate sized" blobs are kept, others discarded.*/ Mat maskOut( img.rows+2, img.cols+2, CV_8UC1, Scalar::all(0) ); Mat mask( img.rows+2, img.cols+2, CV_8UC1, Scalar::all(0) ); Mat maskLocal( img.rows+2, img.cols+2, CV_8UC1, Scalar::all(0)); //Scalar newVal( 200, 150, 100); Scalar lo = Scalar(loDiff, loDiff, loDiff), up = Scalar(upDiff, upDiff, upDiff); int flags = connectivity + (newMaskVal << 8) + CV_FLOODFILL_FIXED_RANGE; for( int y = 0; y < img.rows; y++ ) { for( int x = 0; x < img.cols; x++ ) { if (withinBounds(x, y, img.cols, img.rows)) { if(mask.at<uchar>(y+1, x+1) == 0 && mask.at<uchar>(y-1, x-1) == 0) { maskLocal = Mat::zeros(mask.size(), mask.type()); int area; Scalar newVal( rng.uniform(0,255), rng.uniform(0, 255), rng.uniform(0, 255)); area = floodFill(img, maskLocal, Point(x,y), newVal, 0, lo, up, flags); bitwise_or(mask, maskLocal, mask); if(area>0 && area < 800) { //<<<<<<<<<<<<<<<<< was 600 bitwise_or(maskOut, maskLocal, maskOut); } } else {continue;} } } } return maskOut; }
/** * @function goodFeaturesToTrack_Demo.cpp * @brief Apply Shi-Tomasi corner detector */ void goodFeaturesToTrack_Demo( int, void* ) { if( maxCorners < 1 ) { maxCorners = 1; } /// Parameters for Shi-Tomasi algorithm vector<Point2f> corners; double qualityLevel = 0.01; double minDistance = 10; int blockSize = 3; bool useHarrisDetector = false; double k = 0.04; /// Copy the source image Mat copy; copy = src.clone(); /// Apply corner detection goodFeaturesToTrack( src_gray, corners, maxCorners, qualityLevel, minDistance, Mat(), blockSize, useHarrisDetector, k ); /// Draw corners detected cout<<"** Number of corners detected: "<<corners.size()<<endl; int r = 4; for( int i = 0; i < corners.size(); i++ ) { circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255)), -1, 8, 0 ); } /// Show what you got namedWindow( source_window, WINDOW_AUTOSIZE ); imshow( source_window, copy ); }
Partes ( int quantidade ) { n_partes = quantidade; dprops.resize ( n_partes + 1 ); dpoints.resize ( n_partes + 1 ); divContours.resize( n_partes); cores.resize( n_partes ); for (int i = 0; i < n_partes; i++ ) cores[i] = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) ); tamanho.resize( n_partes + 1 ); for (int i = 0; i < n_partes+1; i++ ) tamanho[i].resize( 2 ); fitL.resize ( n_partes ); boxes.resize ( n_partes ); }
/** * test function for finding and drawing contours in random colors */ JNIEXPORT void JNICALL Java_vrlab_foodui_FoodUiJNI_threshCallback(JNIEnv*, jobject, jlong addrGray, jlong addrRgba) { // variable declaration Mat& mGr = *(Mat*)addrGray; Mat& mRgb = *(Mat*)addrRgba; Mat mCannyOutput; vector<vector<Point> > contours; vector<Vec4i> hierarchy; // Detect edges using canny and find contours Canny( mGr, mCannyOutput, THRESH, THRESH*2, 3 ); findContours( mCannyOutput, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) ); // Draw contours for( int i = 0; i< contours.size(); i++ ) { Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) ); drawContours( mRgb, contours, i, color, 2, 8, hierarchy, 0, Point() ); } }
/** * @function thresh_callback */ void thresh_callback(int, void*) { Mat threshold_output; vector<vector<Point> > contours; vector<Vec4i> hierarchy; /// Detect edges using Threshold threshold(src_gray, threshold_output, thresh, 255, THRESH_BINARY); /// Find contours findContours(threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0)); /// Approximate contours to polygons + get bounding rects and circles vector<vector<Point> > contours_poly(contours.size()); vector<Rect> boundRect(contours.size()); vector<Point2f>center(contours.size()); vector<float>radius(contours.size()); for (size_t i = 0; i < contours.size(); i++) { approxPolyDP(Mat(contours[i]), contours_poly[i], 3, true); boundRect[i] = boundingRect(Mat(contours_poly[i])); minEnclosingCircle(contours_poly[i], center[i], radius[i]); } /// Draw polygonal contour + bonding rects + circles Mat drawing = Mat::zeros(threshold_output.size(), CV_8UC3); for (size_t i = 0; i< contours.size(); i++) { Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)); drawContours(drawing, contours_poly, (int)i, color, 1, 8, vector<Vec4i>(), 0, Point()); rectangle(drawing, boundRect[i].tl(), boundRect[i].br(), color, 2, 8, 0); circle(drawing, center[i], (int)radius[i], color, 2, 8, 0); } /// Show in a window namedWindow("Contours", CV_WINDOW_AUTOSIZE); imshow("Contours", drawing); }
void run_stress() { RNG rng; for(int i = 0; i < 10; ++i) { int winSize = cvRound(rng.uniform(2, 11)) * 2 + 1; for(int j = 0; j < 10; ++j) { int ndisp = cvRound(rng.uniform(5, 32)) * 8; for(int s = 0; s < 10; ++s) { int w = cvRound(rng.uniform(1024, 2048)); int h = cvRound(rng.uniform(768, 1152)); for(int p = 0; p < 2; ++p) { //int winSize = winsz[i]; //int disp = disps[j]; Size imgSize(w, h);//res[s]; int preset = p; printf("Preset = %d, nidsp = %d, winsz = %d, width = %d, height = %d\n", p, ndisp, winSize, imgSize.width, imgSize.height); GpuMat l(imgSize, CV_8U); GpuMat r(imgSize, CV_8U); GpuMat disparity; StereoBM_GPU bm(preset, ndisp, winSize); bm(l, r, disparity); } } } } }
/** * @function Drawing_Rectangles */ int Drawing_Random_Rectangles( Mat image, char* window_name, RNG rng ) { Point pt1, pt2; int lineType = 8; int thickness = rng.uniform( -3, 10 ); for( int i = 0; i < NUMBER; i++ ) { pt1.x = rng.uniform( x_1, x_2 ); pt1.y = rng.uniform( y_1, y_2 ); pt2.x = rng.uniform( x_1, x_2 ); pt2.y = rng.uniform( y_1, y_2 ); rectangle( image, pt1, pt2, randomColor(rng), MAX( thickness, -1 ), lineType ); imshow( window_name, image ); if( waitKey( DELAY ) >= 0 ) { return -1; } } return 0; }
EDfield::EDfield (int resolution, int scale, float sigma, float amp) : resolution(resolution), scale(scale) { RNG rng; rf.resize(2); for (int k=0; k<2; k++) { rf[k].resize(resolution*resolution); for (int i=0;i<resolution;i++) { for (int j=0; j<resolution; j++) { rf[k][i*resolution+j]=rng.uniform(-amp,amp); } } convolve_gaussian(rf[k], sigma, resolution); } }
specimen_t population::mutate(specimen_t indi) { uint64_t iter; for (iter=0; iter<NUMBER_GENES; iter++) { if (rudi_.uniform( 0, (int)(1/mu_r_) ) < 1) { indi.gen.flip(iter); } } indi.fit = calcFitness(indi.gen); indi.calced = fitness_calculation_counter_; return indi; }
/** * @function thresh_callback */ void thresh_callback(int, void* ) { Mat canny_output; vector<vector<Point> > contours; vector<Vec4i> hierarchy; /// Detect edges using canny Canny( src_gray, canny_output, thresh, thresh*2, 3 ); /// Find contours findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) ); /// Draw contours Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 ); for( size_t i = 0; i< contours.size(); i++ ) { Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) ); drawContours( drawing, contours, (int)i, color, 2, 8, hierarchy, 0, Point() ); } /// Show in a window namedWindow( "Contours", CV_WINDOW_AUTOSIZE ); imshow( "Contours", drawing ); }
void PatchGenerator::generateRandomTransform(Point2f srcCenter, Point2f dstCenter, Mat& transform, RNG& rng, bool inverse) const { double lambda1 = rng.uniform(lambdaMin, lambdaMax); double lambda2 = rng.uniform(lambdaMin, lambdaMax); double theta = rng.uniform(thetaMin, thetaMax); double phi = rng.uniform(phiMin, phiMax); // Calculate random parameterized affine transformation A, // A = T(patch center) * R(theta) * R(phi)' * // S(lambda1, lambda2) * R(phi) * T(-pt) double st = sin(theta); double ct = cos(theta); double sp = sin(phi); double cp = cos(phi); double c2p = cp*cp; double s2p = sp*sp; double A = lambda1*c2p + lambda2*s2p; double B = (lambda2 - lambda1)*sp*cp; double C = lambda1*s2p + lambda2*c2p; double Ax_plus_By = A*srcCenter.x + B*srcCenter.y; double Bx_plus_Cy = B*srcCenter.x + C*srcCenter.y; transform.create(2, 3, CV_64F); Mat_<double>& T = (Mat_<double>&)transform; T(0,0) = A*ct - B*st; T(0,1) = B*ct - C*st; T(0,2) = -ct*Ax_plus_By + st*Bx_plus_Cy + dstCenter.x; T(1,0) = A*st + B*ct; T(1,1) = B*st + C*ct; T(1,2) = -st*Ax_plus_By - ct*Bx_plus_Cy + dstCenter.y; if( inverse ) invertAffineTransform(T, T); }
virtual int run_case(int depth, size_t matCount, const Size& size, RNG& rng) { const int maxMatChannels = 10; vector<Mat> src(matCount); int channels = 0; for(size_t i = 0; i < src.size(); i++) { Mat m(size, CV_MAKETYPE(depth, rng.uniform(1,maxMatChannels))); rng.fill(m, RNG::UNIFORM, 0, 100, true); channels += m.channels(); src[i] = m; } Mat dst; merge(src, dst); // check result stringstream commonLog; commonLog << "Depth " << depth << " :"; if(dst.depth() != depth) { ts->printf(cvtest::TS::LOG, "%s incorrect depth of dst (%d instead of %d)\n", commonLog.str().c_str(), dst.depth(), depth); return cvtest::TS::FAIL_INVALID_OUTPUT; } if(dst.size() != size) { ts->printf(cvtest::TS::LOG, "%s incorrect size of dst (%d x %d instead of %d x %d)\n", commonLog.str().c_str(), dst.rows, dst.cols, size.height, size.width); return cvtest::TS::FAIL_INVALID_OUTPUT; } if(dst.channels() != channels) { ts->printf(cvtest::TS::LOG, "%s: incorrect channels count of dst (%d instead of %d)\n", commonLog.str().c_str(), dst.channels(), channels); return cvtest::TS::FAIL_INVALID_OUTPUT; } int diffElemCount = calcDiffElemCount(src, dst); if(diffElemCount > 0) { ts->printf(cvtest::TS::LOG, "%s: there are incorrect elements in dst (part of them is %f)\n", commonLog.str().c_str(), static_cast<float>(diffElemCount)/(channels*size.area())); return cvtest::TS::FAIL_INVALID_OUTPUT; } return cvtest::TS::OK; }
void population::selector(GENO_TYPE& nana) { uint64_t iter; // Roulette Wheel Selection FITNESS_TYPE temp = rudi_.uniform((FITNESS_TYPE)0.0, (FITNESS_TYPE)sig_fit_[pop_size_-1]); for (iter=0; iter<pop_size_; iter++) { if (temp <= sig_fit_[iter]) { nana = pop_[iter].gen; break; } } }
void warpPerspectiveRand(const Mat& src, Mat& dst, Mat& H, RNG& rng) { H.create(3, 3, CV_32FC1); H.at<float>(0, 0) = rng.uniform(0.8f, 1.2f); H.at<float>(0, 1) = rng.uniform(-0.1f, 0.1f); H.at<float>(0, 2) = rng.uniform(-0.1f, 0.1f)*src.cols; H.at<float>(1, 0) = rng.uniform(-0.1f, 0.1f); H.at<float>(1, 1) = rng.uniform(0.8f, 1.2f); H.at<float>(1, 2) = rng.uniform(-0.1f, 0.1f)*src.rows; H.at<float>(2, 0) = rng.uniform(-1e-4f, 1e-4f); H.at<float>(2, 1) = rng.uniform(-1e-4f, 1e-4f); H.at<float>(2, 2) = rng.uniform(0.8f, 1.2f); warpPerspective(src, dst, H, src.size()); }
Mat BoundaryDetector::thresh_callback(int, void* ) { Mat threshold_output; vector<vector<Point> > contours; vector<Vec4i> hierarchy; /// Detect edges using Threshold threshold( src_gray, threshold_output, thresh, 255, THRESH_BINARY ); /// Find contours findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) ); // Having CV_RETR_EXTERNAL instead of CV_RETR_TREE, will only return the outermost contours. //findContours( threshold_output, contours, hierarchy, CV_RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, Point(0, 0) ); /// Approximate contours to polygons + get bounding rects and circles vector<vector<Point> > contours_poly( contours.size() ); vector<Rect> boundRect( contours.size() ); vector<Point2f>center( contours.size() ); vector<float>radius( contours.size() ); int largest_area=0; int largest_contour_index=0; Rect bounding_rect; for( size_t i = 0; i < contours.size(); i++ ) { double a=contourArea( contours[i],false); // Find the area of contour if(a>largest_area){ largest_area=a; largest_contour_index=i; //Store the index of largest contour bounding_rect=boundingRect(contours[i]); // Find the bounding rectangle for biggest contour } approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true ); boundRect[i] = boundingRect( Mat(contours_poly[i]) ); minEnclosingCircle( contours_poly[i], center[i], radius[i] ); } /// Draw polygonal contour + bonding rects + circles Mat drawing = Mat::zeros( threshold_output.size(), CV_8UC3 ); for( size_t i = 0; i< contours.size(); i++ ) { Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) ); //drawContours( src, contours_poly, (int)i, color, 1, 8, vector<Vec4i>(), 0, Point() ); //rectangle( src, boundRect[i].tl(), boundRect[i].br(), color, 2, 8, 0 ); //circle( drawing, center[i], (int)radius[i], color, 2, 8, 0 ); } Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) ); //drawContours( src, contours,largest_contour_index, color, CV_FILLED, 8, hierarchy ); // Draw the largest contour using previously stored index. rectangle(src, bounding_rect, Scalar(0,255,0),1, 8,0); Mat croppedImage = src(bounding_rect); return croppedImage; }
int ParticleGroup::resampleParticle() { int particleNum = ParticleList.size(); double *cumPdf = new double[particleNum]; int *newParticleIndex = new int [particleNum]; std::vector<Particle>::iterator particleIter; std::vector<Particle> newParticleList; double sum = 0.0; int index = 0; for(particleIter = ParticleList.begin(); particleIter != ParticleList.end(); ++particleIter) { sum += particleIter->weight; cumPdf[index] = sum; index++; } double randNum = 0.0; particleIter = ParticleList.begin(); RNG rng; for(int i=0; i<particleNum; i++) { randNum = rng.uniform(0.0, sum); int j; for(j=0; j<particleNum; j++) { if(cumPdf[j] < randNum) { continue; } else { break; } } newParticleIndex[i] = j; Particle p((particleIter+j)->xPos, (particleIter+j)->yPos, (particleIter+j)->width, (particleIter+j)->height); p.weight = (particleIter+j)->weight; p.xPos = p.xPos + rng.gaussian(xNoise); p.yPos = p.yPos + rng.gaussian(yNoise); newParticleList.push_back(p); } ParticleList = newParticleList; return 0; }
/* GENO_TYPE population::discretize(particle_t par) { GENO_TYPE genie; uint64_t iter; for (iter=0; iter<NUMBER_ATTRIBUTES; iter++) { // if (par.pos[iter] > 0.5) // if ( rudi_.uniform((double)0.0,(double)1.0) < (double)( 1.0/ (1.0+exp(par.cc.vel[jter] * -1.0)) ) ) genie[iter] = 1; else genie[iter] = 0; } return genie; } */ void population::discretize(specimen_t& par) { GENO_TYPE genie; uint64_t iter; for (iter=0; iter<NUMBER_ATTRIBUTES; iter++) { if ( rudi_.uniform((double)0.0,(double)1.0) < (double)( 1.0/ (1.0+exp(par.cc.vel[iter] * -1.0)) ) ) { par.cc.pos[iter] = 1.0; par.gen[iter] = 1; } else { par.cc.pos[iter] = 0.0; par.gen[iter] = 0; } } }
void AddSaltAndPepperNoise ( const Mat& src, Mat& dest, double threshold ){ Mat temp = src.clone(); int rows = temp.rows; int cols = temp.cols; RNG rng; for ( int i = 0 ; i<rows ; i++ ) { uchar* row_pointer = temp.ptr(i); for ( int j=0 ; j<cols ; j++ ) { double rnd = rng.uniform((double)0,(double)1); if ( rnd<threshold ) { row_pointer[j]=0; } else if ( rnd > 1-threshold ) { row_pointer[j]=255; } } } dest = temp; }
void myShiTomasi_function( int, void* ) { myShiTomasi_copy = src.clone(); if ( myShiTomasi_qualityLevel < 1 ) { myShiTomasi_qualityLevel = 1; } for ( int j = 0; j < src_gray.rows; j++ ) { for ( int i = 0; i < src_gray.cols; i++ ) { if ( myShiTomasi_dst.at<float>(j, i) > myShiTomasi_minVal + ( myShiTomasi_maxVal - myShiTomasi_minVal) * myShiTomasi_qualityLevel / max_qualityLevel ) { circle( myShiTomasi_copy, Point(i, j), 4, Scalar( rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)), -1, 8, 0); } } } imshow( myShiTomasi_window, myShiTomasi_copy ); }
void Camera::showBiggest() { cuda::GpuMat frame_gpu, frame_hsv_gpu, descriptors_scene; cap >> frame; frame_gpu.upload(frame); cuda::cvtColor(frame_gpu, frame_hsv_gpu, CV_BGR2HSV, 4); frame_hsv_gpu.download(frame_hsv); inRange(frame_hsv, Scalar(iLowH, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV), imgSave); #ifdef DEBUG imshow("Tresh", imgSave); #endif frame_hsv_gpu.upload(imgSave); // Mat cannyOutput; for(int i = 0; i < ErodeDilate; i++) { cuda::createMorphologyFilter(MORPH_ERODE, CV_8UC4, imgSave); cuda::createMorphologyFilter(MORPH_DILATE, CV_8UC4, imgSave); cuda::createMorphologyFilter(MORPH_DILATE, CV_8UC4, imgSave); cuda::createMorphologyFilter(MORPH_ERODE, CV_8UC4, imgSave); } /* erode(imgSave, imgSave, getStructuringElement(MORPH_ELLIPSE, Size(3, 3)) ); dilate( imgSave, imgSave, getStructuringElement(MORPH_ELLIPSE, Size(3, 3)) ); dilate(imgSave, imgSave, getStructuringElement(MORPH_ELLIPSE, Size(8, 8))); erode(imgSave, imgSave, getStructuringElement(MORPH_ELLIPSE, Size(8, 8))); */ //Mat dst; frame_hsv_gpu.download(imgSave); findContours(imgSave, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0,0)); Mat drawing = Mat::zeros(imgSave.size(), CV_8UC3); for(int i = 0; i < contours.size(); i++) { Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)); drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, Point()); } #ifdef DEBUG imshow("Drawing", drawing); imshow("Control", frame_hsv); #endif // if (good_matches.size() >= 4) { color = Scalar( rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)); Mat biggest = Mat::zeros(imgSave.size(), CV_8UC3); drawContours(biggest, contours, getBiggest(), color, 2, 8, hierarchy, 0, Point()); #ifdef DEBUG imshow("Biggest", biggest); #endif waitKey(1); }
/** * Function used to create random matrix on the basis of original image. Function genrates random swap for each pixel, * if overflow occurs another random swap is generated for this pixel. * @param original Matrix containing original image. * @param rand_matrix Matrix to be created which contains random swaps. */ void create_rand_color(Mat& rand_matrix,Mat& original) { Mat copy; original.copyTo(copy); //Variables to work Mat_<Vec2f> _II = rand_matrix; int channels = original.channels(); int nRows = original.rows; int nCols = original.cols * channels; uchar* co=0;// copy of the original image int tmp=0; // temp used to swap // Seeding random number generator with given passowrd rng(hash_fun( ( unsigned char*)pass ) ); //Selecting possible random swaps for( int i = 0; i < rand_matrix.rows; i++) { for( int j = 0; j < rand_matrix.cols; j++ ) { bool flag = false; //no overflow //Do while no overflow occur while(!flag) { //Select random positions _II(i,j)[0] = rng.uniform(0, nRows); //Random row _II(i,j)[1] = rng.uniform(0, nCols); //Random column and channel //Check if no overflow for a seleceted piont co = copy.ptr<uchar>( (int)_II(i,j)[0] );//ROW tmp=co[ (int)_II(i,j)[1] ];//COLUMN + CHANEL if(tmp<255)//NO OVERFLOW { flag = true;//Allowed to change and go to next point } } } } // Allocate the drawed random points to the matrix rand_matrix=_II; }
//初始化 void Initialize(CvMat* pFrameMat,RNG rng){ //记录随机生成的 行(r) 和 列(c) int rand,r,c; //对每个像素样本进行初始化 for(int y=0;y<pFrameMat->rows;y++){//Height for(int x=0;x<pFrameMat->cols;x++){//Width for(int k=0;k<defaultNbSamples;k++){ //随机获取像素样本值 rand=rng.uniform( 0, 9 ); r=y+c_yoff[rand]; if(r<0) r=0; if(r>=pFrameMat->rows) r=pFrameMat->rows-1; //行 c=x+c_xoff[rand]; if(c<0) c=0; if(c>=pFrameMat->cols) c=pFrameMat->cols-1; //列 //存储像素样本值 samples[y][x][k]=CV_MAT_ELEM(*pFrameMat,float,r,c); } samples[y][x][defaultNbSamples]=0; } } }
specimen_t population::populate(void) { specimen_t indi; uint64_t iter; for (iter=0; iter<NUMBER_DIMENSIONS; iter++) { indi.cc.vel[iter] = rudi_.uniform((double)min_.vel[iter],(double)max_.vel[iter]); indi.cc.pos[iter] = rudi_.uniform((double)min_.pos[iter],(double)max_.pos[iter]); } // indi.gen = discretize(indi.cc); discretize(indi); fixer(indi); indi.fit = calcFitness(indi.gen); indi.calced = fitness_calculation_counter_; indi.bc = indi.cc; indi.bf = indi.fit; return indi; }
int ParticleTrackingAlg::resampleParticleList() { double *cumPdf = new double[particleNum]; std::vector<Particle> newParticleList; std::vector<Particle>::iterator particleIter; double sumWeight = 0.0; int index = 0; for(particleIter=particleList.begin(); particleIter!=particleList.end(); ++particleIter) { sumWeight += particleIter->GetParticleWeight(); cumPdf[index] = sumWeight; index++; } double randNum = 0.0; particleIter = particleList.begin(); RNG rng; for(int i=0; i<particleNum; i++) { randNum = rng.uniform(0.0, sumWeight); int j; for(j=0; j<particleNum; j++) { if(cumPdf[j] < randNum) { continue; } else { break; } } Particle p((particleIter+j)->GetParticleRegion(), rng.gaussian(Utility::xNoise), rng.gaussian(Utility::yNoise)); p.SetParticleWeight((particleIter+j)->GetParticleWeight()); newParticleList.push_back(p); } particleList = newParticleList; return 0; }
int NearestNeighborTest::checkFind( const Mat& data ) { int code = CvTS::OK; int pointsCount = 1000; float noise = 0.2f; RNG rng; Mat points( pointsCount, dims, CV_32FC1 ); Mat results( pointsCount, K, CV_32SC1 ); std::vector<int> fmap( pointsCount ); for( int pi = 0; pi < pointsCount; pi++ ) { int fi = rng.next() % featuresCount; fmap[pi] = fi; for( int d = 0; d < dims; d++ ) points.at<float>(pi, d) = data.at<float>(fi, d) + rng.uniform(0.0f, 1.0f) * noise; } code = findNeighbors( points, results ); if( code == CvTS::OK ) { int correctMatches = 0; for( int pi = 0; pi < pointsCount; pi++ ) { if( fmap[pi] == results.at<int>(pi, 0) ) correctMatches++; } double correctPerc = correctMatches / (double)pointsCount; if (correctPerc < .75) { ts->printf( CvTS::LOG, "correct_perc = %d\n", correctPerc ); code = CvTS::FAIL_BAD_ACCURACY; } } return code; }
bool Core_EigenTest::check_full(int type) { const int MAX_DEGREE = 7; RNG rng = ::theRNG(); // fix the seed for (int i = 0; i < ntests; ++i) { int src_size = (int)(std::pow(2.0, (rng.uniform(0, MAX_DEGREE) + 1.))); cv::Mat src(src_size, src_size, type); for (int j = 0; j < src.rows; ++j) for (int k = j; k < src.cols; ++k) if (type == CV_32FC1) src.at<float>(k, j) = src.at<float>(j, k) = cv::randu<float>(); else src.at<double>(k, j) = src.at<double>(j, k) = cv::randu<double>(); if (!test_values(src)) return false; } return true; }
virtual bool runTest(RNG& rng, int mode, int method, const vector<Point3f>& points, const double* epsilon, double& maxError) { Mat rvec, tvec; vector<int> inliers; Mat trueRvec, trueTvec; Mat intrinsics, distCoeffs; generateCameraMatrix(intrinsics, rng); if (method == 4) intrinsics.at<double>(1,1) = intrinsics.at<double>(0,0); if (mode == 0) distCoeffs = Mat::zeros(4, 1, CV_64FC1); else generateDistCoeffs(distCoeffs, rng); generatePose(trueRvec, trueTvec, rng); vector<Point2f> projectedPoints; projectedPoints.resize(points.size()); projectPoints(Mat(points), trueRvec, trueTvec, intrinsics, distCoeffs, projectedPoints); for (size_t i = 0; i < projectedPoints.size(); i++) { if (i % 20 == 0) { projectedPoints[i] = projectedPoints[rng.uniform(0,(int)points.size()-1)]; } } solvePnPRansac(points, projectedPoints, intrinsics, distCoeffs, rvec, tvec, false, 500, 0.5f, 0.99, inliers, method); bool isTestSuccess = inliers.size() >= points.size()*0.95; double rvecDiff = norm(rvec-trueRvec), tvecDiff = norm(tvec-trueTvec); isTestSuccess = isTestSuccess && rvecDiff < epsilon[method] && tvecDiff < epsilon[method]; double error = rvecDiff > tvecDiff ? rvecDiff : tvecDiff; //cout << error << " " << inliers.size() << " " << eps[method] << endl; if (error > maxError) maxError = error; return isTestSuccess; }
/** * @function thresh_callback */ void thresh_callback(int, void* ) { Mat canny_output; vector<vector<Point> > contours; vector<Vec4i> hierarchy; /// Detect edges using canny Canny( src_gray, canny_output, thresh, thresh*2, 3 ); /// Find contours findContours( canny_output, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0) ); /// Get the moments vector<Moments> mu(contours.size() ); for( size_t i = 0; i < contours.size(); i++ ) { mu[i] = moments( contours[i], false ); } /// Get the mass centers: vector<Point2f> mc( contours.size() ); for( size_t i = 0; i < contours.size(); i++ ) { mc[i] = Point2f( static_cast<float>(mu[i].m10/mu[i].m00) , static_cast<float>(mu[i].m01/mu[i].m00) ); } /// Draw contours Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 ); for( size_t i = 0; i< contours.size(); i++ ) { Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) ); drawContours( drawing, contours, (int)i, color, 2, 8, hierarchy, 0, Point() ); circle( drawing, mc[i], 4, color, -1, 8, 0 ); } /// Show in a window namedWindow( "Contours", WINDOW_AUTOSIZE ); imshow( "Contours", drawing ); /// Calculate the area with the moments 00 and compare with the result of the OpenCV function printf("\t Info: Area and Contour Length \n"); for( size_t i = 0; i< contours.size(); i++ ) { printf(" * Contour[%d] - Area (M_00) = %.2f - Area OpenCV: %.2f - Length: %.2f \n", (int)i, mu[i].m00, contourArea(contours[i]), arcLength( contours[i], true ) ); Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) ); drawContours( drawing, contours, (int)i, color, 2, 8, hierarchy, 0, Point() ); circle( drawing, mc[i], 4, color, -1, 8, 0 ); } }
void generateDistCoeffs(Mat& distCoeffs, RNG& rng) { distCoeffs = Mat::zeros(4, 1, CV_64FC1); for (int i = 0; i < 3; i++) distCoeffs.at<double>(i,0) = rng.uniform(0.0, 1.0e-6); }
int main( int argc, const char** argv ) { help(argv); if(argc < 2) { cout << "\nERROR: You had too few parameters.\n" << endl; return -1; } Mat src ; Mat gray; Mat mask; Mat temp; Mat temp2; /************************************************************************/ /* 1. Load an image with interesting textures. Smooth the image in several ways using cv::smooth() with smoothtype=cv::GAUSSIAN. a. Use a symmetric 3 × 3, 5 × 5, 9 × 9, and 11 × 11 smoothing window size and display the results. b. Are the output results nearly the same by smoothing the image twice with a 5 × 5 Gaussian filter as when you smooth once with two 11 × 11 filters? Why or why not? */ /************************************************************************/ src = imread(argv[1]); if (src.empty()) { cout << "\nERROR: parameters is not a image name.\n" << endl; return -1; } double minPixelValue, maxPixelValue; //a Mat smooth33;Mat smooth55;Mat smooth99;Mat smooth111; GaussianBlur(src,smooth33,cv::Size(3,3),0); GaussianBlur(src,smooth55,cv::Size(5,5),0); GaussianBlur(src,smooth99,cv::Size(9,9),0); GaussianBlur(src,smooth111,cv::Size(11,11),0); //b GaussianBlur(smooth55,smooth55,cv::Size(5,5),0); temp = smooth55 - smooth111; cv::minMaxIdx(temp, &minPixelValue, &maxPixelValue); // maxPixelVaule = 19 ,the result is " 5 × 5 Gaussian filter twice" is much like "11 × 11 filters" /************************************************************************/ /* 2. Create a 100 × 100 single-channel image. Set all pixels to 0. Finally, set the center pixel equal to 255. a. Smooth this image with a 5 × 5 Gaussian filter and display the results. What did you find? b. Do this again but with a 9 × 9 Gaussian filter. c. What does it look like if you start over and smooth the image twice with the 5 × 5 filter? Compare this with the 9 × 9 results. Are they nearly the same? Why or why not? */ /************************************************************************/ Mat singleChanel100 = Mat(100,100,CV_8U,Scalar(0)); singleChanel100.at<uchar>(50,50) = 255; //a GaussianBlur(singleChanel100,temp,cv::Size(5,5),0); imshow("5 × 5 Gaussian filter",temp); //b GaussianBlur(singleChanel100,temp,cv::Size(9,9),0); imshow("9 × 9Gaussian filter",temp); //c GaussianBlur(singleChanel100,temp,cv::Size(5,5),0); GaussianBlur(temp,temp,cv::Size(5,5),0); GaussianBlur(singleChanel100,temp2,cv::Size(9,9),0); absdiff(temp,temp2,temp2); cv::minMaxIdx(temp2, &minPixelValue, &maxPixelValue); //maxPixelVaule = 5,the result are nearly the same /************************************************************************/ /* 10. Create a low-variance random image (use a random number call such that the numbers don’t differ by much more than three and most numbers are near zero). Load the image into a drawing program such as PowerPoint, and then draw a wheel of lines meeting at a single point. Use bilateral filtering on the resulting image and explain the results. */ /************************************************************************/ Mat matLowVariance = Mat(512,512,CV_8U,Scalar(0)); RNG arng = cv::theRNG(); arng.fill(matLowVariance,RNG::UNIFORM,0,30); //draw a wheel of lines meeting at the center line(matLowVariance,Point(256,256),Point(256,256-100),Scalar(255),1); line(matLowVariance,Point(256,256),Point(256+100,256+100),Scalar(255),1); line(matLowVariance,Point(256,256),Point(256+100,256),Scalar(255),1); line(matLowVariance,Point(256,256),Point(256+100,256-100),Scalar(255),1); line(matLowVariance,Point(256,256),Point(256,256+100),Scalar(255),1); line(matLowVariance,Point(256,256),Point(256-100,256-100),Scalar(255),1); line(matLowVariance,Point(256,256),Point(256-100,256),Scalar(255),1); line(matLowVariance,Point(256,256),Point(256-100,256+100),Scalar(255),1); imshow("a wheel of lines meeting at a single point",matLowVariance); bilateralFilter(matLowVariance,temp,5,10.0,2.0); imshow("bilateralFilter",temp); /************************************************************************/ /* 11. Load an image of a scene and convert it to grayscale. a. Run the morphological Top Hat operation on your image and display theresults. b. Convert the resulting image into an 8-bit mask. c. Copy a grayscale value into the original image where the Top Hat mask (from Part b of this exercise) is nonzero. Display the results. */ /************************************************************************/ cvtColor(src,gray,COLOR_BGR2GRAY); //a morphologyEx(gray,temp,CV_MOP_TOPHAT,Mat()); imshow(" morphological Top Hat",temp); //b temp.convertTo(mask,CV_8UC1); //c cvtColor(gray,gray,COLOR_GRAY2BGR); gray.copyTo(src,mask); imshow("execrise 11 result",src); /************************************************************************/ /* 12. Load an image with many details. a. Use resize() to reduce the image by a factor of 2 in each dimension (hence the image will be reduced by a factor of 4). Do this three times and display the results. b. Now take the original image and use cv::pyrDown() to reduce it three times, and then display the results. c. How are the two results different? Why are the approaches different? */ /************************************************************************/ //a Mat matResize; resize(src,matResize,cv::Size(0,0),0.5,0.5); resize(matResize,matResize,cv::Size(0,0),0.5,0.5); resize(matResize,matResize,cv::Size(0,0),0.5,0.5); imshow("resize 3 times",matResize); //b Mat matPyrDown; pyrDown(src,matPyrDown); pyrDown(matPyrDown,matPyrDown); pyrDown(matPyrDown,matPyrDown); imshow("pyrDown 3 times",matPyrDown); //c absdiff(matResize,matPyrDown,temp); imshow("two results of resize and pyDown diff",temp); /************************************************************************/ /* 15. Use cv::filter2D() to create a filter that detects only 60-degree lines in an image. Display the results on a sufficiently interesting image scene. */ /************************************************************************/ Mat matWithLines = Mat(512,512,CV_8UC1,Scalar(0)); // create 9 lines for (int i=0;i<9;i++) { line(matWithLines,Point(arng.uniform(0,512),arng.uniform(0,521)),Point(arng.uniform(0,512),arng.uniform(0,521)),Scalar(255),1); } //45 degree line line(matWithLines,Point(0,512),Point(512,0),Scalar(255),1); matWithLines.convertTo(matWithLines,CV_32FC1,1.0/255); // detects only 45-degree lines Mat matKernel = Mat(3,3,CV_32FC1,Scalar(0)); matKernel.at<float>(0,0) = 0 ; matKernel.at<float>(0,1) = 0 ; matKernel.at<float>(0,2) = 1.0/3 ; matKernel.at<float>(1,0) = 0 ; matKernel.at<float>(1,1) = 1.0/3 ; matKernel.at<float>(1,2) = 0 ; matKernel.at<float>(2,0) = 1.0/3 ; matKernel.at<float>(2,1) = 0; matKernel.at<float>(2,2) = 0; filter2D(matWithLines,temp,CV_32FC1,matKernel); threshold(temp,temp,0.99,1,CV_THRESH_BINARY); /************************************************************************/ /* 16. Separable kernels: create a 3 × 3 Gaussian kernel using rows [(1/16, 2/16, 1/16), (2/16, 4/16, 2/16), (1/16, 2/16, 1/16)] and with anchor point in the middle. a. Run this kernel on an image and display the results. b. Now create two one-dimensional kernels with anchors in the center: one going “across” (1/4, 2/4, 1/4), and one going down (1/4, 2/4, 1/4). Load the same original image and use cv::filter2D() to convolve the image twice, once with the first 1D kernel and once with the second 1D kernel. Describe the results. c. Describe the order of complexity (number of operations) for the kernel in part a and for the kernels in part b. The difference is the advantage of being able to use separable kernels and the entire Gaussian class of filters—or any linearly decomposable filter that is separable, since convolution is a linear operation. */ /************************************************************************/ Mat matGaussianKernel = Mat(3,3,CV_32FC1,Scalar(0)); matGaussianKernel.at<float>(0,0) = 1.0/16; matGaussianKernel.at<float>(0,1) = 2.0/16; matGaussianKernel.at<float>(0,2) = 1.0/16; matGaussianKernel.at<float>(1,0) = 2.0/16; matGaussianKernel.at<float>(1,1) = 4.0/16; matGaussianKernel.at<float>(1,2) = 2.0/16; matGaussianKernel.at<float>(2,0) = 1.0/16; matGaussianKernel.at<float>(2,1) = 2.0/16; matGaussianKernel.at<float>(2,2) = 1.0/16; //a src.convertTo(temp,CV_32F,1.0/255); filter2D(temp,temp,CV_32F,matGaussianKernel); imshow("a 3 × 3 Gaussian kernel",temp); //b Mat matKernel1 = Mat(1,3,CV_32FC1,Scalar(0)); Mat matKernel2 = Mat(3,1,CV_32FC1,Scalar(0)); matKernel1.at<float>(0,0) = 1.0/4; matKernel1.at<float>(0,1) = 2.0/4; matKernel1.at<float>(0,2) = 1.0/4; matKernel2.at<float>(0,0) = 1.0/4; matKernel2.at<float>(1,0) = 2.0/4; matKernel2.at<float>(2,0) = 1.0/4; filter2D(temp,temp2,CV_32F,matKernel1); filter2D(temp2,temp2,CV_32F,matKernel2); absdiff(temp,temp2,temp2); //temp and temp2 is just the same mat,maxPixelValue is very small,nearly ZERO cv::minMaxIdx(temp2, &minPixelValue, &maxPixelValue); //c the order is no matter waitKey(); return 0; }