void computehist(IplImage* image, CvRect roi,cv::MatND* hist,int hbins = 40,int sbins = 40) { IplImage* temp = crop( image, roi); //cv::namedWindow( "bitch face", 1 ); //cv::Mat test(image); //cv::imshow("bitch face",test); cv::Mat src(temp); cv::Mat hsv; cvtColor(src, hsv, CV_BGR2HSV); // let's quantize the hue to 30 levels // and the saturation to 32 levels int currHistsize[] = {hbins, sbins}; // hue varies from 0 to 179, see cvtColor float hranges[] = { 0, 180 }; // saturation varies from 0 (black-gray-white) to // 255 (pure spectrum color) float sranges[] = { 0, 256 }; const float* ranges[] = { hranges, sranges }; //cv::MatND localhist; // we compute the histogram from the 0-th and 1-st channels int channels[] = {0, 1}; calcHist( &hsv, 1, channels, cv::Mat(), // do not use mask *hist, 2, currHistsize, ranges, true, // the histogram is uniform false ); //drawHist(*hist); }
void MainWindow::on_actionHistogram_equalization_triggered() { QPixmap pixmap = pixmapItem->pixmap().copy(); QImage image = pixmap.toImage(); int width = image.width(); int height = image.height(); if (width == 0 || height == 0) { ui->statusBar->showMessage( tr("Error. Image bad size"), 3000 ); return; } DialogHistogramEqualization dialog; if (dialog.exec() == QDialog::Rejected) return; int equalizationType = dialog.getEqualizationType(); if (equalizationType == 1) RGB_equalization(image); else if (equalizationType == 2) V_equalization(image); else if (equalizationType == 3) Grayscale_equalization(image); pixmap.convertFromImage(image); pixmapItem_2->setPixmap(pixmap); scene_2->setSceneRect(QRectF(pixmap.rect())); calcHist(pixmap, hist_2, maxLevel_2); drawHist(pixmapItem_4, hist_2, maxLevel_2); }
void MeanShiftDemo( VideoCapture& video, Rect& starting_position, int starting_frame_number, int end_frame) { bool half_size = true; video.set(CV_CAP_PROP_POS_FRAMES,starting_frame_number); Mat current_frame, hls_image; std::vector<cv::Mat> hls_planes(3); video >> current_frame; Rect current_position(starting_position); if (half_size) { resize(current_frame, current_frame, Size( current_frame.cols/2, current_frame.rows/2 )); current_position.height /= 2; current_position.width /= 2; current_position.x /= 2; current_position.y /= 2; } cvtColor(current_frame, hls_image, CV_BGR2HLS); split(hls_image,hls_planes); int chosen_channel = 0; // Hue channel Mat image1ROI = hls_planes[chosen_channel](current_position); float channel_range[2] = { 0.0, 255.0 }; int channel_numbers[1] = { 0 }; int number_bins[1] = { 32 }; MatND histogram[1]; const float* channel_ranges = channel_range; calcHist(&(image1ROI), 1, channel_numbers, Mat(), histogram[0], 1 , number_bins, &channel_ranges); normalize(histogram[0],histogram[0],1.0); rectangle(current_frame,current_position,Scalar(0,255,0),2); Mat starting_frame = current_frame.clone(); int frame_number = starting_frame_number; while (!current_frame.empty() && (frame_number < end_frame)) { // Calculate back projection Mat back_projection_probabilities; calcBackProject(&(hls_planes[chosen_channel]),1,channel_numbers,*histogram,back_projection_probabilities,&channel_ranges,255.0); // Remove low saturation points from consideration Mat saturation_mask; inRange( hls_image, Scalar(0,10,50,0),Scalar(180,256,256,0), saturation_mask ); bitwise_and( back_projection_probabilities, back_projection_probabilities,back_projection_probabilities, saturation_mask ); // Mean shift TermCriteria criteria(cv::TermCriteria::MAX_ITER,5,0.01); meanShift(back_projection_probabilities,current_position,criteria); // Output to screen rectangle(current_frame,current_position,Scalar(0,255,0),2); Mat chosen_channel_image, back_projection_image; cvtColor(hls_planes[chosen_channel], chosen_channel_image, CV_GRAY2BGR); cvtColor(back_projection_probabilities, back_projection_image, CV_GRAY2BGR); Mat row1_output = JoinImagesHorizontally( starting_frame, "Starting position", chosen_channel_image, "Chosen channel (Hue)", 4 ); Mat row2_output = JoinImagesHorizontally( back_projection_image, "Back projection", current_frame, "Current position", 4 ); Mat mean_shift_output = JoinImagesVertically(row1_output,"",row2_output,"", 4); imshow("Mean Shift Tracking", mean_shift_output ); // Advance to next frame video >> current_frame; if (half_size) resize(current_frame, current_frame, Size( current_frame.cols/2, current_frame.rows/2 )); cvtColor(current_frame, hls_image, CV_BGR2HLS); split(hls_image,hls_planes); frame_number++; cvWaitKey(1000); } char c = cvWaitKey(); cvDestroyAllWindows(); }
double cv::calcGlobalOrientation( InputArray _orientation, InputArray _mask, InputArray _mhi, double /*timestamp*/, double duration ) { Mat orient = _orientation.getMat(), mask = _mask.getMat(), mhi = _mhi.getMat(); Size size = mhi.size(); CV_Assert( mask.type() == CV_8U && orient.type() == CV_32F && mhi.type() == CV_32F ); CV_Assert( mask.size() == size && orient.size() == size ); CV_Assert( duration > 0 ); int histSize = 12; float _ranges[] = { 0.f, 360.f }; const float* ranges = _ranges; Mat hist; calcHist(&orient, 1, 0, mask, hist, 1, &histSize, &ranges); // find the maximum index (the dominant orientation) Point baseOrientPt; minMaxLoc(hist, 0, 0, 0, &baseOrientPt); float fbaseOrient = (baseOrientPt.x + baseOrientPt.y)*360.f/histSize; // override timestamp with the maximum value in MHI double timestamp = 0; minMaxLoc( mhi, 0, ×tamp, 0, 0, mask ); // find the shift relative to the dominant orientation as weighted sum of relative angles float a = (float)(254. / 255. / duration); float b = (float)(1. - timestamp * a); float delbound = (float)(timestamp - duration); if( mhi.isContinuous() && mask.isContinuous() && orient.isContinuous() ) { size.width *= size.height; size.height = 1; } /* a = 254/(255*dt) b = 1 - t*a = 1 - 254*t/(255*dur) = (255*dt - 254*t)/(255*dt) = (dt - (t - dt)*254)/(255*dt); -------------------------------------------------------- ax + b = 254*x/(255*dt) + (dt - (t - dt)*254)/(255*dt) = (254*x + dt - (t - dt)*254)/(255*dt) = ((x - (t - dt))*254 + dt)/(255*dt) = (((x - (t - dt))/dt)*254 + 1)/255 = (((x - low_time)/dt)*254 + 1)/255 */ float shiftOrient = 0, shiftWeight = 0; for( int y = 0; y < size.height; y++ ) { const float* mhiptr = mhi.ptr<float>(y); const float* oriptr = orient.ptr<float>(y); const uchar* maskptr = mask.ptr<uchar>(y); for( int x = 0; x < size.width; x++ ) { if( maskptr[x] != 0 && mhiptr[x] > delbound ) { /* orient in 0..360, base_orient in 0..360 -> (rel_angle = orient - base_orient) in -360..360. rel_angle is translated to -180..180 */ float weight = mhiptr[x] * a + b; float relAngle = oriptr[x] - fbaseOrient; relAngle += (relAngle < -180 ? 360 : 0); relAngle += (relAngle > 180 ? -360 : 0); if( fabs(relAngle) < 45 ) { shiftOrient += weight * relAngle; shiftWeight += weight; } } } } // add the dominant orientation and the relative shift if( shiftWeight == 0 ) shiftWeight = 0.01f; fbaseOrient += shiftOrient / shiftWeight; fbaseOrient -= (fbaseOrient < 360 ? 0 : 360); fbaseOrient += (fbaseOrient >= 0 ? 0 : 360); return fbaseOrient; }
void FindObjectMain::process_camshift() { // Some user defined parameters int vmin = config.vmin; int vmax = config.vmax; int smin = config.smin; float hranges[] = { 0, 180 }; const float* phranges = hranges; // Create aligned, RGB images if(!object_image) { object_image = cvCreateImage( cvSize(object_image_w, object_image_h), 8, 3); } if(!scene_image) { scene_image = cvCreateImage( cvSize(scene_image_w, scene_image_h), 8, 3); } // Temporary row pointers unsigned char **object_rows = new unsigned char*[object_image_h]; unsigned char **scene_rows = new unsigned char*[scene_image_h]; for(int i = 0; i < object_image_h; i++) { object_rows[i] = (unsigned char*)(object_image->imageData + i * object_image_w * 3); } for(int i = 0; i < scene_image_h; i++) { scene_rows[i] = (unsigned char*)(scene_image->imageData + i * scene_image_w * 3); } // Transfer object & scene to RGB images for OpenCV if(!prev_object) prev_object = new unsigned char[object_image_w * object_image_h * 3]; // Back up old object image memcpy(prev_object, object_image->imageData, object_image_w * object_image_h * 3); BC_CModels::transfer(object_rows, get_input(object_layer)->get_rows(), 0, 0, 0, 0, 0, 0, object_x1, object_y1, object_w, object_h, 0, 0, object_w, object_h, get_input(object_layer)->get_color_model(), BC_RGB888, 0, 0, 0); BC_CModels::transfer(scene_rows, get_input(scene_layer)->get_rows(), 0, 0, 0, 0, 0, 0, scene_x1, scene_y1, scene_w, scene_h, 0, 0, scene_w, scene_h, get_input(scene_layer)->get_color_model(), BC_RGB888, 0, 0, 0); delete [] object_rows; delete [] scene_rows; // from camshiftdemo.cpp // Compute new object if(memcmp(prev_object, object_image->imageData, object_image_w * object_image_h * 3) || !hist.dims) { Mat image(object_image); Mat hsv, hue, mask; cvtColor(image, hsv, CV_RGB2HSV); int _vmin = vmin, _vmax = vmax; //printf("FindObjectMain::process_camshift %d\n", __LINE__); inRange(hsv, Scalar(0, smin, MIN(_vmin,_vmax)), Scalar(180, 256, MAX(_vmin, _vmax)), mask); int ch[] = { 0, 0 }; hue.create(hsv.size(), hsv.depth()); mixChannels(&hsv, 1, &hue, 1, ch, 1); Rect selection = Rect(0, 0, object_w, object_h); trackWindow = selection; int hsize = 16; Mat roi(hue, selection), maskroi(mask, selection); calcHist(&roi, 1, 0, maskroi, hist, 1, &hsize, &phranges); normalize(hist, hist, 0, 255, CV_MINMAX); } // compute scene Mat image(scene_image); Mat hsv, hue, mask, backproj; cvtColor(image, hsv, CV_RGB2HSV); int _vmin = vmin, _vmax = vmax; inRange(hsv, Scalar(0, smin, MIN(_vmin,_vmax)), Scalar(180, 256, MAX(_vmin, _vmax)), mask); int ch[] = {0, 0}; hue.create(hsv.size(), hsv.depth()); mixChannels(&hsv, 1, &hue, 1, ch, 1); //printf("FindObjectMain::process_camshift %d %d %d\n", __LINE__, hist.dims, hist.size[1]); RotatedRect trackBox = RotatedRect( Point2f((object_x1 + object_x2) / 2, (object_y1 + object_y2) / 2), Size2f(object_w, object_h), 0); trackWindow = Rect(0, 0, scene_w, scene_h); if(hist.dims > 0) { calcBackProject(&hue, 1, 0, hist, backproj, &phranges); backproj &= mask; //printf("FindObjectMain::process_camshift %d\n", __LINE__); // if(trackWindow.width <= 0 || // trackWindow.height <= 0) // { // trackWindow.width = object_w; // trackWindow.height = object_h; // } trackBox = CamShift(backproj, trackWindow, TermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 )); //printf("FindObjectMain::process_camshift %d\n", __LINE__); // if( trackWindow.area() <= 1 ) // { // int cols = backproj.cols; // int rows = backproj.rows; // int r = (MIN(cols, rows) + 5) / 6; // trackWindow = Rect(trackWindow.x - r, trackWindow.y - r, // trackWindow.x + r, trackWindow.y + r) & // Rect(0, 0, cols, rows); // } } // printf("FindObjectMain::process_camshift %d %d %d %d %d\n", // __LINE__, // trackWindow.x, // trackWindow.y, // trackWindow.width, // trackWindow.height); // Draw mask over scene if(config.draw_keypoints) { for(int i = 0; i < scene_h; i++) { switch(get_input(scene_layer)->get_color_model()) { case BC_YUV888: { unsigned char *input = backproj.data + i * scene_image_w; unsigned char *output = get_input(scene_layer)->get_rows()[i + scene_y1] + scene_x1 * 3; for(int j = 0; j < scene_w; j++) { output[0] = *input; output[1] = 0x80; output[2] = 0x80; output += 3; input++; } break; } } } } // Get object outline in the scene layer // printf("FindObjectMain::process_camshift %d %d %d %d %d %d\n", // __LINE__, // (int)trackBox.center.x, // (int)trackBox.center.y, // (int)trackBox.size.width, // (int)trackBox.size.height, // (int)trackBox.angle); double angle = trackBox.angle * 2 * M_PI / 360; double angle1 = atan2(-(double)trackBox.size.height / 2, -(double)trackBox.size.width / 2) + angle; double angle2 = atan2(-(double)trackBox.size.height / 2, (double)trackBox.size.width / 2) + angle; double angle3 = atan2((double)trackBox.size.height / 2, (double)trackBox.size.width / 2) + angle; double angle4 = atan2((double)trackBox.size.height / 2, -(double)trackBox.size.width / 2) + angle; double radius = sqrt(SQR(trackBox.size.height / 2) + SQR(trackBox.size.width / 2)); border_x1 = (int)(trackBox.center.x + cos(angle1) * radius) + scene_x1; border_y1 = (int)(trackBox.center.y + sin(angle1) * radius) + scene_y1; border_x2 = (int)(trackBox.center.x + cos(angle2) * radius) + scene_x1; border_y2 = (int)(trackBox.center.y + sin(angle2) * radius) + scene_y1; border_x3 = (int)(trackBox.center.x + cos(angle3) * radius) + scene_x1; border_y3 = (int)(trackBox.center.y + sin(angle3) * radius) + scene_y1; border_x4 = (int)(trackBox.center.x + cos(angle4) * radius) + scene_x1; border_y4 = (int)(trackBox.center.y + sin(angle4) * radius) + scene_y1; }
// main function in class ColorGroup // analyze and classify images void ColorGroup::run(){ int i; // colors = (string*)malloc(sizeof(string)*num); // temporary buffer (additional function that changes the size of buffer is needed) string colors[100]; // make the folder to save the result makecolorfolder(); // for each image, for(i=0; i<num; i++){ Mat image, hsv_image; image = images[i]; // extract color element of the image // if the images doesn't have 3 channels, it's classified to 'etc' if (image.channels() == 3){ // change the mode of image from RGB to HSV cvtColor(image, hsv_image, CV_BGR2HSV); // Separate the image in 3 places ( H, S, V ) vector<Mat> hsv_planes; split( hsv_image, hsv_planes ); // make histogram with color element ('h' in hsv means 'hue') int hHistSize = 180; float hRange[] = {0, 180}, vRange[] = {0, 100}; const float* hHistRange = { hRange }; Mat h_hist; calcHist( &hsv_planes[0], 1, 0, Mat(), h_hist, 1, &hHistSize, &hHistRange, true, false); int hHist_w = 360; int hHist_h = 400; int hbin_w = cvRound( (double) hHist_w/hHistSize); Mat hHistImage( hHist_h, hHist_w, CV_8UC3, Scalar( 0, 0, 0) ); normalize(h_hist, h_hist, 0, hHistImage.rows, NORM_MINMAX, -1, Mat() ); // with histogram of color, get the most used color colors[i] = mostUsedColor(h_hist, hHistSize); cout << "most used color is " << colors[i] << endl; } else { colors[i] = "etc"; } } // with the color array result, classify and save images in appropriate folder if( flag == IS_FROM_FILES ){ int i=0; string filename; // open "filenames.txt" and get the name of file ifstream file; file.open("filenames.txt"); // save the images in appropriate folder which is the color array result while(!file.eof() && i<num ){ getline(file, filename); char newfile[FILE_NAME_MAX+20] = "./color_result/"; strcat(newfile, colors[i].c_str()); strcat(newfile, "/"); strcat(newfile, filename.c_str()); cout << "new file adress : " << newfile << endl; imwrite(newfile, images[i]); i++; } }else if( flag == IS_FROM_URLS ){ int i; // make the file name to save. It counts from 0 in increasing order // and save the images in appropriate folder which is the color array result const char file[10] = "photo_"; for( i=0; i<num; i++){ char newfile[FILE_NAME_MAX+20] = "./color_result/"; strcat(newfile, colors[i].c_str()); strcat(newfile, "/"); strcat(newfile, file); if( i < 10 ){ strcat(newfile, "00"); strcat(newfile, intToString(i).c_str()); }else if ( i >= 10 && i < 100 ){ strcat(newfile, "0"); strcat(newfile, intToString(i).c_str()); }else if ( i >= 100 && i < 1000 ){ strcat(newfile, intToString(i).c_str()); } cout << newfile << endl; strcat(newfile, ".jpg"); imwrite(newfile, images[i]); } } cout << "color grouping finished" << endl; }
CvPoint2D32f getPupilCenter(Mat &eye_box){ //find x and y gradients Mat gradientX = computeGradient(eye_box); Mat gradientY = computeGradient(eye_box.t()).t(); //normalize and threshold the gradient Mat mags = matrixMagnitude(gradientX, gradientY); //create a blurred and inverted image for weighting Mat weight; bitwise_not(eye_box, weight); blur(weight, weight, Size(2,2)); //weight the magnitudes, convert to 8-bit for thresholding weight.convertTo(weight, CV_32F); mags = mags.mul(weight); normalize(mags, mags, 0, 1, NORM_MINMAX, CV_32F); mags.convertTo(mags, CV_8UC1, 255); //threshold using Otsu's method threshold(mags, mags, 0, 255, THRESH_BINARY | THRESH_OTSU); //convert to CV_32S and filter gradients mags.convertTo(mags, CV_32S); gradientY = gradientY.mul(mags); gradientX = gradientX.mul(mags); //resize arrays to same size resize(gradientX, gradientX, Size(EYE_FRAME_SIZE, EYE_FRAME_SIZE), 0, 0, INTER_NEAREST); resize(gradientY, gradientY, Size(EYE_FRAME_SIZE, EYE_FRAME_SIZE), 0, 0, INTER_NEAREST); resize(weight, weight, Size(EYE_FRAME_SIZE, EYE_FRAME_SIZE), 0, 0, INTER_NEAREST); //imshow("gradY", gradientY * 255); //imshow("weight", weight / 255); //run the algorithm: // for each possible gradient location // Note: these loops are reversed from the way the paper does them // it evaluates every possible center for each gradient location instead of // every possible gradient location for every center. Mat out = Mat::zeros(weight.rows,weight.cols, CV_32F); float max_val = 0; //for all pixels in the image for (int y = 0; y < EYE_FRAME_SIZE; ++y) { const int *grad_x = gradientX.ptr<int>(y), *grad_y = gradientY.ptr<int>(y); for (int x = 0; x < EYE_FRAME_SIZE; ++x) { int gX = grad_x[x], gY = grad_y[x]; if (gX == 0 && gY == 0) { continue; } //for all possible centers for (int cy = 0; cy < EYE_FRAME_SIZE; ++cy) { float *Or = out.ptr<float>(cy); const float *Wr = weight.ptr<float>(cy); for (int cx = 0; cx < EYE_FRAME_SIZE; ++cx) { //ignore center of box if (x == cx && y == cy) { continue; } //create a vector from the possible center to the gradient origin int dx = x - cx; int dy = y - cy; //compute dot product using lookup table float dotProduct; if(dx > 0 && dy > 0){ dotProduct = dpX[dx+EYE_FRAME_SIZE*dy]*gX + dpY[dx+EYE_FRAME_SIZE*dy]*gY; }else if(dx > 0){ dotProduct = dpX[dx-EYE_FRAME_SIZE*dy]*gX - dpY[dx-EYE_FRAME_SIZE*dy]*gY; }else if(dy > 0){ dotProduct = -dpX[-dx+EYE_FRAME_SIZE*dy]*gX - dpY[-dx+EYE_FRAME_SIZE*dy]*gY; }else{ dotProduct = -dpX[-dx-EYE_FRAME_SIZE*dy]*gX - dpY[-dx-EYE_FRAME_SIZE*dy]*gY; } //ignore negative dot products as they point away from eye if(dotProduct <= 0.0){ continue; } //square and multiply by the weight Or[cx] += dotProduct * dotProduct * Wr[cx]; //compare with max if(Or[cx] > max_val){ max_val = Or[cx]; } } } } } //resize for debugging resize(out, out, Size(500,500), 0, 0, INTER_NEAREST); out = 255 * out / max_val; //imshow("calc", out / 255); //histogram setup Mat hist; int histSize = 256; float range[] = { 0, 256 } ; const float* histRange = { range }; //calculate the histogram calcHist(&out,1, 0, Mat(), hist, 1, &histSize, &histRange, true, //uniform true //accumulate ); //get cutoff for top 10 pixels float top_end_sum = 0; int top_end = 0.92 * 255; for (int i = 255; i > 0; i--) { top_end_sum += hist.at<float>(i); if(top_end_sum > 3000){ top_end = i; break; } } //draw image for debugging Mat histImage(400, 512, CV_8UC3, Scalar(0,0,0)); int bin_w = cvRound( (double) 512/histSize ); normalize(hist, hist, 0, histImage.rows, NORM_MINMAX, -1, Mat()); /// Draw for each channel for( int i = 1; i < histSize; i++) { line(histImage, Point(bin_w*(i), 400 - cvRound(hist.at<float>(i))), Point(bin_w*(i), 400), Scalar(i, i, i), 2, 8, 0); } //imshow("hist", histImage); //threshold to get just the pupil //printf("top_end: %d\n", top_end); threshold(out, out, top_end, 255, THRESH_TOZERO); //calc center of mass float sum = 0; float sum_x = 0; float sum_y = 0; for (int y = 0; y < out.rows; ++y) { float* row = out.ptr<float>(y); for (int x = 0; x < out.cols; ++x) { float val = row[x]*row[x]; if(val > 0){ sum += val; sum_x += val*x; sum_y += val*y; } } } Size eye_box_size = eye_box.size(); Size out_size = out.size(); //cout << "Size1: "+to_string(eye_box_size.width)+","+to_string(eye_box_size.height)+"\n"; //cout << "Size2: "+to_string(out_size.width)+","+to_string(out_size.height)+"\n"; float x_scale = (float) eye_box_size.width / out_size.width; float y_scale = (float) eye_box_size.height / out_size.height; CvPoint2D32f max = cvPoint2D32f(x_scale*sum_x/sum, y_scale*sum_y/sum); //circle(out, max, 3, 0); //imshow("thresh", out / 255); return max; }
int main( int argc, char** argv ) { /// Load an image cv::Mat src, greyIm, histeqIm; src = cv::imread( argv[1] ); if( !src.data ) { printf("Input file? No? ouuuupsss thooooorryyyyy\n"); return -1; } cv::Size s = src.size(); int rows = s.height; int cols = s.width; // Setup a rectangle to define your region of interest cv::Rect myROI(0, rows/2, cols, rows/2); // Crop the full image to that image contained by the rectangle myROI // Note that this doesn't copy the data cv::Mat croppedImage = src(myROI); cv::imwrite("output/1_low_half.jpg", croppedImage); cv::cvtColor(croppedImage, greyIm, cv::COLOR_BGR2GRAY); cv::Size crop_size = croppedImage.size(); int crop_rows = crop_size.height; int crop_cols = crop_size.width; cv::imwrite("output/2_grey_scale.jpg", greyIm); cv::equalizeHist( greyIm, histeqIm ); cv::imwrite("output/3_hist_eq.jpg", histeqIm); std::vector<std::vector<cv::Point> > contours; std::vector<cv::Vec4i> hierarchy; // Reduce noise with kernel 3x3 cv::Mat blurIm; blur(histeqIm, blurIm, cv::Size(3,3)); cv::imwrite("output/4_blur.jpg", blurIm); // Canny detector cv::Mat edgesIm; Canny(blurIm, edgesIm, thresh, thresh*ratio, kernel_size); cv::imwrite("output/5_edge.jpg", edgesIm); // Find contours cv::findContours(edgesIm, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE, cv::Point(0,0)); // Approximate contours to polygons + get bounding rects and circles std::vector<std::vector<cv::Point> > contours_poly(contours.size()); std::vector<cv::Rect> boundRect(contours.size()); std::vector<cv::Point2f>center(contours.size()); std::vector<float>radius(contours.size()); for (int i = 0; i < contours.size(); i++) { cv::approxPolyDP(cv::Mat(contours[i]), contours_poly[i], 3, true); boundRect[i] = cv::boundingRect(cv::Mat(contours_poly[i])); cv::minEnclosingCircle((cv::Mat)contours_poly[i], center[i], radius[i]); } // Draw contours int j=0; cv::Mat drawing = cv::Mat::zeros(edgesIm.size(), CV_8UC3); cv::Mat piece[5], hsvIm[5]; for (int i = 0; i < contours.size(); i++) { if (!((boundRect[i].height >= boundRect[i].width/5) && (boundRect[i].height <= boundRect[i].width/2) && boundRect[i].height<=crop_rows/4 && boundRect[i].width<=crop_cols/2 && boundRect[i].height>=crop_rows/10 && boundRect[i].width>=crop_cols/6)) continue; cv::Rect roi = boundRect[i]; piece[j] = croppedImage(roi); imwrite("output/contour"+std::to_string(j)+".jpg", piece[j]); j++; cv::Scalar color = cv::Scalar(rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255)); cv::drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, cv::Point()); cv::rectangle(drawing, boundRect[i].tl(), boundRect[i].br(), color, 2, 8, 0); //circle(drawing, center[i], (int)radius[i], color, 2, 8, 0); } imwrite("output/6_contours.jpg", drawing); int h_bins = 50; int s_bins = 60; int histSize[] = { h_bins, s_bins }; float h_ranges[] = { 0, 180 }; float s_ranges[] = { 0, 256 }; const float* ranges[] = { h_ranges, s_ranges }; int channels[] = { 0, 1 }; cv::Mat hist[5]; for (int i=0; i<j; i++){ cvtColor(piece[i], hsvIm[i], cv::COLOR_BGR2HSV); imwrite("output/hsvIm"+std::to_string(i)+".jpg", hsvIm[i]); calcHist( &hsvIm[i], 1, channels, cv::Mat(), hist[i], 2, histSize, ranges, true, false ); //normalize( hsvIm[i], hsvIm[i], 0, 1, cv::NORM_MINMAX, -1, cv::Mat() ); } return 0; }
void MainWindow::on_actionOtsu_local_triggered() { QPixmap pixmap = pixmapItem->pixmap().copy(); QImage image = pixmap.toImage(); int width = image.width(); int height = image.height(); if (width == 0 || height == 0) { ui->statusBar->showMessage( tr("Error. Image bad size"), 3000 ); return; } DialogOtsuLocal dialog; dialog.setSpinBoxes(pixmapItem->pixmap().width(), pixmapItem->pixmap().height()); if (dialog.exec() == QDialog::Rejected) return; std::vector<int> grays(width * height, 0); int countX = dialog.gridX(); int countY = dialog.gridY(); double shiftY = ( (double) height ) / countY; double shiftX = ( (double) width ) / countX; double curX = 0.0; double curY = 0.0; double nextX = 0.0; double nextY = 0.0; int cX; int cY; int nX; int nY; QRgb black = qRgb(0,0,0); QRgb white = qRgb(255,255,255); QRgb newColor; for (int i = 0; i < countY; ++i) { nextY += shiftY; cY = qFloor(curY); nY = qFloor(nextY); curX = 0.0; nextX = 0.0; for (int j = 0; j < countX; ++j) { nextX += shiftX; cX = qFloor(curX); nX = qFloor(nextX); int threshold = otsu(image, grays, cX, cY, nX, nY); for (int y = cY; y < nY; ++y) for (int x = cX; x < nX; ++x) { int gray = grays[x + y * (nX - cX)]; if ( gray < threshold ) newColor = black; else newColor = white; image.setPixel(x, y, newColor); } curX = nextX; } curY = nextY; } pixmap.convertFromImage(image); pixmapItem_2->setPixmap(pixmap); scene_2->setSceneRect(QRectF(pixmap.rect())); calcHist(pixmap, hist_2, maxLevel_2); drawHist(pixmapItem_4, hist_2, maxLevel_2); }
void MainWindow::on_actionBrightness_gradient_triggered() { QPixmap pixmap = pixmapItem->pixmap().copy(); QImage image = pixmap.toImage(); int width = image.width(); int height = image.height(); if (width == 0 || height == 0) { ui->statusBar->showMessage( tr("Error. Image bad size"), 3000 ); return; } std::vector< std::vector<int> > grays( width, std::vector<int>(height) ); for (int y = 0; y < height; ++y) for (int x = 0; x < width; ++x) { QRgb oldColor = image.pixel(x, y); int gray = qPow( 0.2126 * qPow(qRed(oldColor), 2.2) + 0.7152 * qPow(qGreen(oldColor), 2.2) + 0.0722 * qPow(qBlue(oldColor), 2.2), 1/2.2 ); grays[x][y] = gray; } int G_x; int G_y; int G; unsigned long int dividend = 0; unsigned int divisor = 0; for (int y = 0; y < height; ++y) for (int x = 0; x < width; ++x) { if (x == 0) G_x = grays[x+1][y]; else if (x == width - 1) G_x = grays[x-1][y]; else G_x = grays[x+1][y] - grays[x-1][y]; if (y == 0) G_y = grays[x][y+1]; else if (y == height - 1) G_y = grays[x][y-1]; else G_y = grays[x][y+1] - grays[x][y-1]; G = qMax( qAbs(G_x), qAbs(G_y) ); dividend += grays[x][y] * G; divisor += G; } int threshold = dividend / divisor; if (0 <= threshold && threshold <= 255) { QRgb black = qRgb(0,0,0); QRgb white = qRgb(255,255,255); QRgb newColor; for (int y = 0; y < height; ++y) for (int x = 0; x < width; ++x) { int gray = grays[x][y]; if ( gray < threshold ) newColor = black; else newColor = white; image.setPixel(x, y, newColor); } } else ui->statusBar->showMessage(tr("Error. Invalid threshold"), 3000); pixmap.convertFromImage(image); pixmapItem_2->setPixmap(pixmap); scene_2->setSceneRect(QRectF(pixmap.rect())); calcHist(pixmap, hist_2, maxLevel_2); drawHist(pixmapItem_4, hist_2, maxLevel_2); }
void MainWindow::on_actionZoom_triggered() { QPixmap oldPixmap = pixmapItem->pixmap(); QImage oldImage = oldPixmap.toImage(); int oldWidth = oldImage.width(); int oldHeight = oldImage.height(); if (oldWidth == 0 || oldHeight == 0) { ui->statusBar->showMessage( tr("Error. Image bad size"), 3000 ); return; } DialogZoom dialog; if (dialog.exec() == QDialog::Rejected) return; double value = dialog.getValue(); int choice = dialog.getChoice(); int width = oldWidth * value; int height = oldHeight * value; QImage image(width, height, QImage::Format_ARGB32); image.fill( QColor(255, 255, 255) ); if (choice == 1) { for (int i = 0; i < width; ++i) for (int j = 0; j < height; ++j) { int srcX = i / value; int srcY = j / value; image.setPixel(i, j, oldImage.pixel(srcX, srcY)); } } else if (choice == 2) { int h, w; double t; double u; double tmp; double d1, d2, d3, d4; QRgb p1, p2, p3, p4; int red, green, blue; for (int j = 0; j < height; ++j) { tmp = j / (double) (height - 1) * (oldHeight - 1); h = qFloor(tmp); h = h < 0? 0: (h >= oldHeight - 1? oldHeight - 2: h); u = tmp - h; for (int i = 0; i < width; ++i) { tmp = i / (double) (width - 1) * (oldWidth - 1); w = qFloor(tmp); w = w < 0? 0: (w >= oldWidth - 1? oldWidth - 2: w); t = tmp - w; d1 = (1 - t) * (1 - u); d2 = t * (1 - u); d3 = t * u; d4 = (1 - t) * u; p1 = oldImage.pixel(w, h); p2 = oldImage.pixel(w + 1, h); p3 = oldImage.pixel(w + 1, h + 1); p4 = oldImage.pixel(w, h + 1); red = (int)(qRed(p1) * d1) + (int)(qRed(p2) * d2) + (int)(qRed(p3) * d3) + (int)(qRed(p4) * d4); blue = (int)(qBlue(p1) * d1) + (int)(qBlue(p2) * d2) + (int)(qBlue(p3) * d3) + (int)(qBlue(p4) * d4); green = (int)(qGreen(p1) * d1) + (int)(qGreen(p2) * d2) + (int)(qGreen(p3) * d3) + (int)(qGreen(p4) * d4); image.setPixel(i, j, qRgb(red, green, blue)); } } } else if (choice == 3) { int scale = qCeil(value); for (int j = scale; j < height - 2 * value; ++j) { // int h = qFloor(j / value); // h = h < 0? 0: (h >= oldHeight - 1? oldHeight - 2: h); for (int i = scale; i < width - 2 * value; ++i) { // int w = qFloor(i / value); // w = w < 0? 0: (w >= oldWidth - 1? oldWidth - 2: w); int srcX = qFloor(i / value); int srcY = qFloor(j / value); double relativeX = (i / value) - qFloor((i / value)); double relativeY = (j / value) - qFloor((j / value)); QRgb p00 = oldImage.pixel(srcX - 1, srcY - 1); QRgb p10 = oldImage.pixel(srcX, srcY - 1); QRgb p20 = oldImage.pixel(srcX + 1, srcY - 1); QRgb p30 = oldImage.pixel(srcX + 2, srcY - 1); QRgb p01 = oldImage.pixel(srcX - 1, srcY); QRgb p11 = oldImage.pixel(srcX, srcY); QRgb p21 = oldImage.pixel(srcX + 1, srcY); QRgb p31 = oldImage.pixel(srcX + 2, srcY); QRgb p02 = oldImage.pixel(srcX - 1, srcY + 1); QRgb p12 = oldImage.pixel(srcX, srcY + 1); QRgb p22 = oldImage.pixel(srcX + 1, srcY + 1); QRgb p32 = oldImage.pixel(srcX + 2, srcY + 1); QRgb p03 = oldImage.pixel(srcX - 1, srcY + 2); QRgb p13 = oldImage.pixel(srcX, srcY + 2); QRgb p23 = oldImage.pixel(srcX + 1, srcY + 2); QRgb p33 = oldImage.pixel(srcX + 2, srcY + 2); double r0 = CubicInterpolation( relativeX, qRed(p00), qRed(p10), qRed(p20), qRed(p30) ); double r1 = CubicInterpolation( relativeX, qRed(p01), qRed(p11), qRed(p21), qRed(p31) ); double r2 = CubicInterpolation( relativeX, qRed(p02), qRed(p12), qRed(p22), qRed(p32) ); double r3 = CubicInterpolation( relativeX, qRed(p03), qRed(p13), qRed(p23), qRed(p33) ); int r = qMax(0.0, qMin(255.0, CubicInterpolation(relativeY, r0, r1, r2, r3))); double g0 = CubicInterpolation( relativeX, qGreen(p00), qGreen(p10), qGreen(p20), qGreen(p30) ); double g1 = CubicInterpolation( relativeX, qGreen(p01), qGreen(p11), qGreen(p21), qGreen(p31) ); double g2 = CubicInterpolation( relativeX, qGreen(p02), qGreen(p12), qGreen(p22), qGreen(p32) ); double g3 = CubicInterpolation( relativeX, qGreen(p03), qGreen(p13), qGreen(p23), qGreen(p33) ); int g = qMax(0.0, qMin(255.0, CubicInterpolation(relativeY, g0, g1, g2, g3))); double b0 = CubicInterpolation( relativeX, qBlue(p00), qBlue(p10), qBlue(p20), qBlue(p30) ); double b1 = CubicInterpolation( relativeX, qBlue(p01), qBlue(p11), qBlue(p21), qBlue(p31) ); double b2 = CubicInterpolation( relativeX, qBlue(p02), qBlue(p12), qBlue(p22), qBlue(p32) ); double b3 = CubicInterpolation( relativeX, qBlue(p03), qBlue(p13), qBlue(p23), qBlue(p33) ); int b = qMax(0.0, qMin(255.0, CubicInterpolation(relativeY, b0, b1, b2, b3))); image.setPixel(i, j, qRgb(r, g, b)); } } } QPixmap pixmap; pixmap.convertFromImage(image); pixmapItem_2->setPixmap(pixmap); scene_2->setSceneRect(QRectF(pixmap.rect())); calcHist(pixmap, hist_2, maxLevel_2); drawHist(pixmapItem_4, hist_2, maxLevel_2); }
void MainWindow::on_actionPiecewise_linear_triggered() { QPixmap pixmap = pixmapItem->pixmap().copy(); QImage image = pixmap.toImage(); int width = image.width(); int height = image.height(); if (width == 0 || height == 0) { ui->statusBar->showMessage( tr("Error. Image bad size"), 3000 ); return; } DialogPiecewiseLinear dialog; dialog.setText(pieceWiseLinearText); if (dialog.exec() == QDialog::Rejected) return; std::vector<double> nums = dialog.getNums(); pieceWiseLinearText = dialog.getString(); bool normalize = dialog.isNormalize(); std::vector<double> csR(nums.size() / 4); std::vector<double> csG(nums.size() / 4); std::vector<double> csB(nums.size() / 4); if (normalize) { int maxR = -1; int maxG = -1; int maxB = -1; for (int y = 0; y < height; ++y) for (int x = 0; x < width; ++x) { QRgb oldColor = image.pixel(x, y); int red = qRed(oldColor); int green = qGreen(oldColor); int blue = qBlue(oldColor); if (red > maxR) maxR = red; if (green > maxG) maxG = green; if (blue > maxB) maxB = blue; } for (unsigned int i = 0; i < nums.size(); i += 4) { csR[i/4] = 255 / (nums[i] * maxR + nums[i+1]); csG[i/4] = 255 / (nums[i] * maxG + nums[i+1]); csB[i/4] = 255 / (nums[i] * maxB + nums[i+1]); } } for (int y = 0; y < height; ++y) for (int x = 0; x < width; ++x) { QRgb oldColor = image.pixel(x, y); int red = qRed(oldColor); int green = qGreen(oldColor); int blue = qBlue(oldColor); double cR = 1.0; double cG = 1.0; double cB = 1.0; for (unsigned int i = 0; i < nums.size(); i += 4) { double k = nums[i]; double b = nums[i+1]; int left = qFloor(nums[i+2]); int right = qFloor(nums[i+3]); if (left <= red && red <= right) { if (normalize) cR = csR[i/4]; red = cR * (k * red + b); if (red < 0) red = 0; if (red > 255) red = 255; } if (left <= green && green <= right) { if (normalize) cG = csG[i/4]; green = cG * (k * green + b); if (green < 0) green = 0; if (green > 255) green = 255; } if (left <= blue && blue <= right) { if (normalize) cB = csB[i/4]; blue = cB * (k * blue + b); if (blue < 0) blue = 0; if (blue > 255) blue = 255; } } image.setPixel(x, y, qRgb(red, green, blue)); } pixmap.convertFromImage(image); pixmapItem_2->setPixmap(pixmap); scene_2->setSceneRect(QRectF(pixmap.rect())); calcHist(pixmap, hist_2, maxLevel_2); drawHist(pixmapItem_4, hist_2, maxLevel_2); }
AppTemplate::AppTemplate(const Mat* frame_set, const Rect iniWin,int ID) :ID(ID)//bgr,hsv,lab { //get roi out of frame set Rect body_win=scaleWin(iniWin,1/TRACKING_TO_BODYSIZE_RATIO); Rect roi_win(body_win.x-body_win.width,body_win.y-body_win.width,3*body_win.width,2*body_win.width+body_win.height); body_win= body_win&Rect(0,0,frame_set[0].cols,frame_set[0].rows); roi_win=roi_win&Rect(0,0,frame_set[0].cols,frame_set[0].rows); Mat roi_set[]={Mat(frame_set[0],roi_win),Mat(frame_set[1],roi_win),Mat(frame_set[2],roi_win)}; Rect iniWin_roi=iniWin-Point(roi_win.x,roi_win.y); //scores for each channel list<ChannelScore> channel_score; Mat mask_roi(roi_set[0].rows,roi_set[0].cols,CV_8UC1,Scalar(0)); rectangle(mask_roi,iniWin_roi,Scalar(255),-1); Mat inv_mask_roi(roi_set[0].rows,roi_set[0].cols,CV_8UC1,Scalar(255)); rectangle(inv_mask_roi,body_win-Point(roi_win.x,roi_win.y),Scalar(0),-1); //calculate score for each channel Mat temp_hist; Mat temp_bp; int hist_size[]={BIN_NUMBER}; for (int i=0;i<9;i++) { float range1[]={0,255}; if (i==3) { range1[1]=179; } const float* hist_range[]={range1}; calcHist(roi_set,3,&i,inv_mask_roi,temp_hist,1,hist_size,hist_range); normalize(temp_hist,temp_hist,255,0.0,NORM_L1);//scale to 255 for display calcBackProject(roi_set,3,&i,temp_hist,temp_bp,hist_range); int c[]={0}; int hs[]={BIN_NUMBER}; float hr[]={0,255}; const float* hrr[]={hr}; Mat hist_fore; Mat hist_back; calcHist(&temp_bp,1,c,mask_roi,hist_fore,1,hs,hrr); calcHist(&temp_bp,1,c,inv_mask_roi,hist_back,1,hs,hrr); normalize(hist_fore,hist_fore,1.0,0.0,NORM_L1); normalize(hist_back,hist_back,1.0,0.0,NORM_L1); //deal with gray image to get rid of #IND double score=getVR(hist_back,hist_fore); score=score==score ? score:0; channel_score.push_back(ChannelScore(i,score)); } //choose the 2 highest scored channels channel_score.sort(compareChannel); channels[0]=channel_score.back().idx; channel_score.pop_back(); channels[1]=channel_score.back().idx; //using 2 best channel to calculate histogram for (int i=0;i<2;++i) { _hRang[i][0]=0; if (channels[i]==3) _hRang[i][1]=179; else _hRang[i][1]=255; hRange[i]=_hRang[i]; } calcHist(roi_set,3,channels,inv_mask_roi,temp_hist,2,hSize,hRange); normalize(temp_hist,temp_hist,255,0,NORM_L1); Mat final_mask;//mask for sampling calcBackProject(roi_set,3,channels,temp_hist,final_mask,hRange); threshold(final_mask,final_mask,5,255,CV_THRESH_BINARY_INV); final_mask=min(final_mask,mask_roi); //choose the best two feature space for foreground**************** Mat hist_fore,hist_back; channel_score.clear(); double sum_score=0; for (int i=0;i<9;i++) { float range1[]={0,255}; if (i==3) { range1[1]=179; } const float* hist_range[]={range1}; Mat temp_hist_neg; calcHist(roi_set,3,&i,final_mask,temp_hist,1,hist_size,hist_range); normalize(temp_hist,temp_hist,255,0,NORM_L1); calcHist(roi_set,3,&i,inv_mask_roi,temp_hist_neg,1,hist_size,hist_range); normalize(temp_hist_neg,temp_hist_neg,255,0,NORM_L1); log(temp_hist,temp_hist); log(temp_hist_neg,temp_hist_neg); temp_hist=temp_hist-temp_hist_neg; threshold(temp_hist,temp_hist,0,255,CV_THRESH_TOZERO); normalize(temp_hist,temp_hist,255,0.0,NORM_L1);//scale to 255 for display calcBackProject(roi_set,3,&i,temp_hist,temp_bp,hist_range); int c[]={0}; int hs[]={BIN_NUMBER}; float hr[]={0,255}; const float* hrr[]={hr}; calcHist(&temp_bp,1,c,final_mask,hist_fore,1,hs,hrr); calcHist(&temp_bp,1,c,inv_mask_roi,hist_back,1,hs,hrr); normalize(hist_fore,hist_fore,1.0,0.0,NORM_L1); normalize(hist_back,hist_back,1.0,0.0,NORM_L1); double score=getVR(hist_back,hist_fore); score=score==score ? score:0; channel_score.push_back(ChannelScore(i,score)); sum_score+=exp(score); } channel_score.sort(compareChannel); channels[0]=channel_score.back().idx; channel_score.pop_back(); channels[1]=channel_score.back().idx; for (int i=0;i<2;++i) { _hRang[i][0]=0; if (channels[i]==3) _hRang[i][1]=179; else _hRang[i][1]=255; hRange[i]=_hRang[i]; } calcHist(roi_set,3,channels,final_mask,hist,2,hSize,hRange);/////////////////// normalize(hist,hist,255,0,NORM_L1); //recover the shift_vector Mat backPro; calcBackProject(roi_set,3,channels,hist,backPro,hRange); iniWin_roi=iniWin-Point(roi_win.x,roi_win.y); Point2f origin_point_roi((float)(iniWin_roi.x+0.5*iniWin_roi.width),(float)(iniWin_roi.y+0.5*iniWin_roi.height)); meanShift(backPro,iniWin_roi,TermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 )); Point2f shift_point_roi((float)(iniWin_roi.x+0.5*iniWin_roi.width),(float)(iniWin_roi.y+0.5*iniWin_roi.height)); shift_vector=(shift_point_roi-origin_point_roi)*(1/(float)iniWin.width); }
int ThreshHolder::Yen(const Mat &image, bool ignoreBlack, bool ignoreWhite) { Mat workingImage = image.clone(); /// Establish the number of bins int histSize = 256; /// Set the ranges ( for B,G,R) ) float range[] = {0, 256}; const float *histRange = {range}; Mat data; calcHist(&workingImage, 1, 0, Mat(), data, 1, &histSize, &histRange); //Ignore full Black and White if (ignoreBlack) { data.at<float>(0) = 0.0f; } if (ignoreWhite) { data.at<float>(255) = 0.0f; } int threshold; int ih, it; float crit; float max_crit; float norm_histo[histSize]; /* normalized histogram */ float P1[histSize]; /* cumulative normalized histogram */ float P1_sq[histSize]; float P2_sq[histSize]; int total = 0; for (ih = 0; ih < histSize; ih++) { total += data.at<float>(ih); } for (ih = 0; ih < histSize; ih++) { norm_histo[ih] = data.at<float>(ih) / total; } P1[0] = norm_histo[0]; for (ih = 1; ih < histSize; ih++) { P1[ih] = P1[ih - 1] + norm_histo[ih]; } P1_sq[0] = norm_histo[0] * norm_histo[0]; for (ih = 1; ih < histSize; ih++) { P1_sq[ih] = P1_sq[ih - 1] + norm_histo[ih] * norm_histo[ih]; } P2_sq[histSize - 1] = 0.0; for (ih = histSize - 2; ih >= 0; ih--) { P2_sq[ih] = P2_sq[ih + 1] + norm_histo[ih + 1] * norm_histo[ih + 1]; } /* Find the threshold that maximizes the criterion */ threshold = -1; max_crit = INT_MIN; for (it = 0; it < histSize; it++) { crit = -1.0f * ((P1_sq[it] * P2_sq[it]) > 0.0f ? log(P1_sq[it] * P2_sq[it]) : 0.0f) + 2 * ((P1[it] * (1.0f - P1[it])) > 0.0f ? log(P1[it] * (1.0f - P1[it])) : 0.0f); if (crit > max_crit) { max_crit = crit; threshold = it; } } return threshold; }
/* * objective : get the gray level map of the input image and rescale it to the range [0-255] */ static void rescaleGrayLevelMat(const cv::Mat &inputMat, cv::Mat &outputMat, const float histogramClippingLimit) { // adjust output matrix wrt the input size but single channel std::cout<<"Input image rescaling with histogram edges cutting (in order to eliminate bad pixels created during the HDR image creation) :"<<std::endl; //std::cout<<"=> image size (h,w,channels) = "<<inputMat.size().height<<", "<<inputMat.size().width<<", "<<inputMat.channels()<<std::endl; //std::cout<<"=> pixel coding (nbchannel, bytes per channel) = "<<inputMat.elemSize()/inputMat.elemSize1()<<", "<<inputMat.elemSize1()<<std::endl; // rescale between 0-255, keeping floating point values cv::normalize(inputMat, outputMat, 0.0, 255.0, cv::NORM_MINMAX); // extract a 8bit image that will be used for histogram edge cut cv::Mat intGrayImage; if (inputMat.channels()==1) { outputMat.convertTo(intGrayImage, CV_8U); }else { cv::Mat rgbIntImg; outputMat.convertTo(rgbIntImg, CV_8UC3); cv::cvtColor(rgbIntImg, intGrayImage, cv::COLOR_BGR2GRAY); } // get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation cv::Mat dst, hist; int histSize = 256; calcHist(&intGrayImage, 1, 0, cv::Mat(), hist, 1, &histSize, 0); cv::Mat normalizedHist; normalize(hist, normalizedHist, 1, 0, cv::NORM_L1, CV_32F); // normalize histogram so that its sum equals 1 double min_val, max_val; minMaxLoc(normalizedHist, &min_val, &max_val); //std::cout<<"Hist max,min = "<<max_val<<", "<<min_val<<std::endl; // compute density probability cv::Mat denseProb=cv::Mat::zeros(normalizedHist.size(), CV_32F); denseProb.at<float>(0)=normalizedHist.at<float>(0); int histLowerLimit=0, histUpperLimit=0; for (int i=1;i<normalizedHist.size().height;++i) { denseProb.at<float>(i)=denseProb.at<float>(i-1)+normalizedHist.at<float>(i); //std::cout<<normalizedHist.at<float>(i)<<", "<<denseProb.at<float>(i)<<std::endl; if ( denseProb.at<float>(i)<histogramClippingLimit) histLowerLimit=i; if ( denseProb.at<float>(i)<1-histogramClippingLimit) histUpperLimit=i; } // deduce min and max admitted gray levels float minInputValue = (float)histLowerLimit/histSize*255; float maxInputValue = (float)histUpperLimit/histSize*255; std::cout<<"=> Histogram limits " <<"\n\t"<<histogramClippingLimit*100<<"% index = "<<histLowerLimit<<" => normalizedHist value = "<<denseProb.at<float>(histLowerLimit)<<" => input gray level = "<<minInputValue <<"\n\t"<<(1-histogramClippingLimit)*100<<"% index = "<<histUpperLimit<<" => normalizedHist value = "<<denseProb.at<float>(histUpperLimit)<<" => input gray level = "<<maxInputValue <<std::endl; //drawPlot(denseProb, "input histogram density probability", histLowerLimit, histUpperLimit); drawPlot(normalizedHist, "input histogram", histLowerLimit, histUpperLimit); // rescale image range [minInputValue-maxInputValue] to [0-255] outputMat-=minInputValue; outputMat*=255.0/(maxInputValue-minInputValue); // cut original histogram and back project to original image cv::threshold( outputMat, outputMat, 255.0, 255.0, 2 ); //THRESH_TRUNC, clips values above 255 cv::threshold( outputMat, outputMat, 0.0, 0.0, 3 ); //THRESH_TOZERO, clips values under 0 }
void MainWindow::on_actionBrightness_quantization_triggered() { QPixmap pixmap = pixmapItem->pixmap().copy(); QImage image = pixmap.toImage(); int width = image.width(); int height = image.height(); if (width == 0 || height == 0) { ui->statusBar->showMessage( tr("Error. Image bad size"), 3000 ); return; } std::vector<int> grays(width * height); for (int y = 0; y < height; ++y) for (int x = 0; x < width; ++x) { QRgb oldColor = image.pixel(x, y); int gray = qPow( 0.2126 * qPow(qRed(oldColor), 2.2) + 0.7152 * qPow(qGreen(oldColor), 2.2) + 0.0722 * qPow(qBlue(oldColor), 2.2), 1/2.2 ); grays[x + y * width] = gray; } std::vector<unsigned int> Rs(256, 0); std::vector<unsigned int> Gs(256, 0); std::vector<unsigned int> Bs(256, 0); std::vector<int> hist(256, 0); for (int y = 0; y < height; ++y) for (int x = 0; x < width; ++x) { int num = grays[x + y * width]; Rs[num] += qRed( image.pixel(x, y) ); Gs[num] += qGreen( image.pixel(x, y) ); Bs[num] += qBlue( image.pixel(x, y) ); ++hist[num]; } int quantsCountMaximum = 0; std::map<int, QRgb> colors; for (int i = 0; i < 256; ++i) { if (hist[i] == 0) continue; Rs[i] /= hist[i]; Gs[i] /= hist[i]; Bs[i] /= hist[i]; colors[i] = qRgb(Rs[i], Gs[i], Bs[i]); ++quantsCountMaximum; } DialogQuantization dialog; dialog.setQuantCountMaximum(quantsCountMaximum); if (dialog.exec() == QDialog::Rejected) return; int quantsCount = dialog.quantsCount(); double shift = 256 / quantsCount; double cur = 0.0; double next = 0.0; for (int i = 0; i < quantsCount; ++i) { next += shift; int c = qFloor(cur); int n = qFloor(next); int minC = 256; for (std::map<int, QRgb>::iterator it = colors.begin(); it != colors.end(); ++it) if (c <= (it->first) && (it->first) < n) if (it->first < minC) minC = it->first; QRgb newColor = colors[minC]; for (int y = 0; y < height; ++y) for (int x = 0; x < width; ++x) { int num = grays[x + y * width]; if (c <= num && num < n) image.setPixel(x, y, newColor); else continue; } cur = next; } pixmap.convertFromImage(image); pixmapItem_2->setPixmap(pixmap); scene_2->setSceneRect(QRectF(pixmap.rect())); //ui->graphicsView_2->fitInView(scene_2->itemsBoundingRect(), Qt::KeepAspectRatio); calcHist(pixmap, hist_2, maxLevel_2); drawHist(pixmapItem_4, hist_2, maxLevel_2); }
//----------------------------------------------------------------------------------------------------- void FrameAnalyzerHistogram::analyze(Mat &input, Mat &output) { lastFrame=currentFrame; currentFrame=input; frameList.append(input); std::cout << frameList.length() << std::endl; /// ---------------------------------------------------------------------- vector<Mat> bgr_planes; /// Separate the image in 3 places ( B, G and R ) split(input, bgr_planes); // Установка количества бинов: int histSize = 256; // Установка граничных значений ( for B,G,R) ) float range[] = { 0, 256 }; const float* histRange = { range }; bool uniform = true; bool accumulate = false; Mat b_hist, g_hist, r_hist; // Вычислить гистограммы: calcHist(&bgr_planes[0], 1, 0, Mat(), b_hist, 1, &histSize, &histRange, uniform, accumulate); calcHist(&bgr_planes[1], 1, 0, Mat(), g_hist, 1, &histSize, &histRange, uniform, accumulate); calcHist(&bgr_planes[2], 1, 0, Mat(), r_hist, 1, &histSize, &histRange, uniform, accumulate); // Отрисовать гистограммы для B, G и R int hist_w = 640; int hist_h = 480; int bin_w = cvRound((double) hist_w / histSize); Mat histImage(hist_h, hist_w, CV_8UC3, Scalar(0, 0, 0)); // Нормализация результата к [ 0, histImage.rows ] normalize(b_hist, b_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat()); normalize(g_hist, g_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat()); normalize(r_hist, r_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat()); // Отрисовать для каждого канала for (int i = 1; i < histSize; i++) { line(histImage, Point(bin_w * (i - 1), hist_h - cvRound(b_hist.at<float>(i - 1))), Point(bin_w * (i), hist_h - cvRound(b_hist.at<float>(i))), Scalar(255, 0, 0), 2, 8, 0); line(histImage, Point(bin_w * (i - 1), hist_h - cvRound(g_hist.at<float>(i - 1))), Point(bin_w * (i), hist_h - cvRound(g_hist.at<float>(i))), Scalar(0, 255, 0), 2, 8, 0); line(histImage, Point(bin_w * (i - 1), hist_h - cvRound(r_hist.at<float>(i - 1))), Point(bin_w * (i), hist_h - cvRound(r_hist.at<float>(i))), Scalar(0, 0, 255), 2, 8, 0); } output = histImage; /* /// Display namedWindow("calcHist Demo", CV_WINDOW_AUTOSIZE); imshow("calcHist Demo", histImage); */ }
void MainWindow::on_actionGray_world_triggered() { QPixmap pixmap = pixmapItem->pixmap().copy(); QImage image = pixmap.toImage(); int width = image.width(); int height = image.height(); if (width == 0 || height == 0) { ui->statusBar->showMessage( tr("Error. Image bad size"), 3000 ); return; } double avgRed = 0.0; double avgGreen = 0.0; double avgBlue = 0.0; for (int y = 0; y < height; ++y) for (int x = 0; x < width; ++x) { QRgb oldColor = image.pixel(x, y); avgRed += qRed(oldColor); avgGreen += qGreen(oldColor); avgBlue += qBlue(oldColor); } int length = width * height; avgRed /= length; avgGreen /= length; avgBlue /= length; double avg = (avgRed + avgGreen + avgBlue) / 3; for (int y = 0; y < height; ++y) for (int x = 0; x < width; ++x) { QRgb oldColor = image.pixel(x, y); double red = qRed(oldColor) * avg / avgRed; double green = qGreen(oldColor) * avg / avgGreen; double blue = qBlue(oldColor) * avg / avgBlue; if (red < 0) red = 0; if (red > 255) red = 255; if (green < 0) green = 0; if (green > 255) green = 255; if (blue < 0) blue = 0; if (blue > 255) blue = 255; image.setPixel(x, y, qRgb(red, green, blue)); } pixmap.convertFromImage(image); pixmapItem_2->setPixmap(pixmap); scene_2->setSceneRect(QRectF(pixmap.rect())); calcHist(pixmap, hist_2, maxLevel_2); drawHist(pixmapItem_4, hist_2, maxLevel_2); }
void showHistogram(Mat im,string str) { im.convertTo(im, CV_8U); cout << "th1=" <<TH1<< endl; string savePath; /// 设定bin数目 int histSize = 256-TH1; /// 设定取值范围 ( R,G,B) ) float range[] = { TH1, 255 }; const float* histRange = { range }; bool uniform = true; bool accumulate = true; Mat r_hist; /// 计算直方图: calcHist(&im, 1, 0, Mat(), r_hist, 1, &histSize, &histRange, uniform, true); savePath = "./output/histogram_" + str + ".xls"; outXls(savePath,r_hist,"float" ); Mat accumulate_hist = Mat::zeros(r_hist.rows, 1, CV_32F); // 创建直方图画布 int hist_w = 400; int hist_h = 400; int bin_w = cvRound((double)hist_w / histSize); Mat histImage(hist_w, hist_h, CV_8UC3, Scalar(0, 0, 0)); //将负的直方图扩展进来 //accumulate histogram for (int m = 0; m < r_hist.rows;m++) { accumulate_hist.at<float>(m, 0) = sum(r_hist,m); } outXls("./output/accuxls.xls", accumulate_hist, "float"); /// 将直方图归一化到范围 [ 0, histImage.rows ],[0,400] normalize(r_hist, r_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat()); //normalize(accumulate_hist, accumulate_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat()); /// 在直方图画布上画出直方图 for (int i = 1; i < histSize; i++) { /*Point up(bin_w*(i - 1), hist_h - cvRound(r_hist.at<float>(i - 1))); Point bottom(bin_w*(i - 1), hist_h); line(histImage, bottom, up, Scalar(0, 0, 255), 2, 8, 0);*/ /*line(histImage, Point(bin_w*(i - 1), hist_h - cvRound(accumulate_hist.at<float>(i - 1))), Point(bin_w*(i), hist_h - cvRound(accumulate_hist.at<float>(i))), Scalar(0, 0, 255), 2, 8, 0);*/ line(histImage, Point(bin_w*(i - 1), hist_h - cvRound(r_hist.at<float>(i - 1))), Point(bin_w*(i), hist_h - cvRound(r_hist.at<float>(i))), Scalar(0, 0, 255), 2, 8, 0); } /// 显示直方图 imshow(str, histImage); }
void MainWindow::on_actionLinear_triggered() { QPixmap pixmap = pixmapItem->pixmap().copy(); QImage image = pixmap.toImage(); int width = image.width(); int height = image.height(); if (width == 0 || height == 0) { ui->statusBar->showMessage( tr("Error. Image bad size"), 3000 ); return; } int minR = 256; int maxR = -1; int minG = 256; int maxG = -1; int minB = 256; int maxB = -1; for (int y = 0; y < height; ++y) for (int x = 0; x < width; ++x) { QRgb oldColor = image.pixel(x, y); int red = qRed(oldColor); int green = qGreen(oldColor); int blue = qBlue(oldColor); if (red < minR) minR = red; if (red > maxR) maxR = red; if (green < minG) minG = green; if (green > maxG) maxG = green; if (blue < minB) minB = blue; if (blue > maxB) maxB = blue; } for (int y = 0; y < height; ++y) for (int x = 0; x < width; ++x) { QRgb oldColor = image.pixel(x, y); int red = ( qRed(oldColor) - minR) * 255 / (maxR - minR); int green = ( qGreen(oldColor) - minG) * 255 / (maxG - minG); int blue = ( qBlue(oldColor) - minB) * 255 / (maxB - minB); if (red < 0) red = 0; if (red > 255) red = 255; if (green < 0) green = 0; if (green > 255) green = 255; if (blue < 0) blue = 0; if (blue > 255) blue = 255; image.setPixel(x, y, qRgb(red, green, blue)); } pixmap.convertFromImage(image); pixmapItem_2->setPixmap(pixmap); scene_2->setSceneRect(QRectF(pixmap.rect())); calcHist(pixmap, hist_2, maxLevel_2); drawHist(pixmapItem_4, hist_2, maxLevel_2); }
void HistogramOpenCV::liczHistogram(){ calcHist(&grayFrame,1,channel,Mat(),hist, 1,histSize,ranges); minMaxLoc(hist,&minValue,&maxValue,&indexMin,&indexMax); scale=(double)heightHistImage/maxValue; }
void MainWindow::on_actionGamma_correction_triggered() { QPixmap pixmap = pixmapItem->pixmap().copy(); QImage image = pixmap.toImage(); int width = image.width(); int height = image.height(); if (width == 0 || height == 0) { ui->statusBar->showMessage( tr("Error. Image bad size"), 3000 ); return; } DialogGammaCorrection dialog; if (dialog.exec() == QDialog::Rejected) return; int maxR = -1; int maxG = -1; int maxB = -1; for (int y = 0; y < height; ++y) for (int x = 0; x < width; ++x) { QRgb oldColor = image.pixel(x, y); int red = qRed(oldColor); int green = qGreen(oldColor); int blue = qBlue(oldColor); if (red > maxR) maxR = red; if (green > maxG) maxG = green; if (blue > maxB) maxB = blue; } double gamma = dialog.getGamma(); double cR = 255 / qPow(maxR, gamma); double cG = 255 / qPow(maxG, gamma); double cB = 255 / qPow(maxB, gamma); for (int y = 0; y < height; ++y) for (int x = 0; x < width; ++x) { QRgb oldColor = image.pixel(x, y); int red = cR * qPow(qRed(oldColor), gamma); int green = cG * qPow(qGreen(oldColor), gamma); int blue = cB * qPow(qBlue(oldColor), gamma); if (red < 0) red = 0; if (red > 255) red = 255; if (green < 0) green = 0; if (green > 255) green = 255; if (blue < 0) blue = 0; if (blue > 255) blue = 255; image.setPixel(x, y, qRgb(red, green, blue)); } pixmap.convertFromImage(image); pixmapItem_2->setPixmap(pixmap); scene_2->setSceneRect(QRectF(pixmap.rect())); calcHist(pixmap, hist_2, maxLevel_2); drawHist(pixmapItem_4, hist_2, maxLevel_2); }
// main function in class BrightnessGroup // analyze and classify images void BrightnessGroup::run(){ int i; // get the number of clusters from user to set the number of group cout << "How many clusters? "; cin >> numcluster; cout << endl; // make the folder to save the result makebrightnessfolder( numcluster ); // set the size of brightness array to the number of images brightness = (float*)malloc(sizeof(float)*num); // for each image, for( i=0; i<num; i++){ Mat image, hsv_image; image = images[i]; // extract color element of the image // if the images doesn't have 3 channels, it's classified to '-1' if (image.channels() == 3){ // change the mode of image from RGB to HSV cvtColor(image, hsv_image, CV_BGR2HSV); // Separate the image in 3 places ( H, S, V ) vector<Mat> hsv_planes; split( hsv_image, hsv_planes ); // make histogram with brightness element ('v' in hsv means 'value') int vHistSize = 100; float vRange[] = {0, 100}; const float* vHistRange = { vRange }; Mat v_hist; calcHist( &hsv_planes[0], 1, 0, Mat(), v_hist, 1, &vHistSize, &vHistRange, true, false); int vHist_w = 300; int vHist_h = 400; int vbin_w = cvRound( (double) vHist_w/vHistSize); Mat vHistImage( vHist_h, vHist_w, CV_8UC3, Scalar( 0, 0, 0) ); normalize(v_hist, v_hist, 0, vHistImage.rows, NORM_MINMAX, -1, Mat() ); // with histogram of brightness, get the average of the brightness brightness[i] = avgBrightness(v_hist, vHistSize); cout << "The average of brightness is " << brightness[i] << endl; }else{ brightness[i] = -1; } cout << "next, go to other picture..." << endl; } // with the average of brightness data, // let the image get its own group by k means clustering k_means(); // with the clusters array result, classify and save images in appropriate folder if( flag == IS_FROM_FILES ){ int i=0; string filename; // open "filenames.txt" and get the name of file ifstream file; file.open("filenames.txt"); // save the images in appropriate folder which is the clusters array result while(!file.eof() && i<num ){ getline(file, filename); char newfile[FILE_NAME_MAX+20] = "./brightness_result/"; strcat(newfile, intToString(clusters[i]).c_str()); strcat(newfile, "/"); strcat(newfile, filename.c_str()); cout << "new file adress : " << newfile << endl; imwrite(newfile, images[i]); i++; } }else if( flag == IS_FROM_URLS ){ int i; // make the file name to save. It counts from 0 in increasing order // and save the images in appropriate folder which is the color array const char file[10] = "photo_"; for( i=0; i<num; i++){ char newfile[FILE_NAME_MAX+20] = "./brightness_result/"; strcat(newfile, intToString(clusters[i]).c_str()); strcat(newfile, "/"); strcat(newfile, file); if( i < 10 ){ strcat(newfile, "00"); strcat(newfile, intToString(i).c_str()); }else if ( i >= 10 && i < 100 ){ strcat(newfile, "0"); strcat(newfile, intToString(i).c_str()); }else if ( i >= 100 && i < 1000 ){ strcat(newfile, intToString(i).c_str()); } cout << newfile << endl; strcat(newfile, ".jpg"); imwrite(newfile, images[i]); } } cout << "brightness grouping finished" << endl; }
int Judgement::JudgementYON(Mat &image) { int success = 0; MatND dstHist; Mat histoImg = image.clone(); calcHist(&histoImg, 1, &channels, Mat(), dstHist, 1, &size, ranges); Mat dstImg(256, 256, CV_8U, Scalar(0));//画直方图 double minValue = 0; double maxValue = 0; Point maxloc; minMaxLoc(dstHist, &minValue, &maxValue, NULL, &maxloc); //cout << " " << n << "." << m << " " << maxValue << endl; int hpt = saturate_cast<int>(0.9 * 256); vector<int> Boundnum; for (int j = 0; j < 256; j++) { float binValue = dstHist.at<float>(j); int realValue = saturate_cast<int>(binValue * hpt / maxValue); if (realValue != 0) { rectangle(dstImg, Point(j, 255), Point(j, 256 - realValue), Scalar(255)); Boundnum.push_back(j); } } int maxdata = *max_element(Boundnum.begin(), Boundnum.end()); int mindata = *min_element(Boundnum.begin(), Boundnum.end());//寻找直方图动态范围 Rect recttemp; recttemp.x = maxloc.x; recttemp.y = maxloc.y - int((maxdata - mindata)*0.15); recttemp.width = 1; recttemp.height = int((maxdata - mindata)*0.3); rectangle(dstHist, recttemp, Scalar(0), -1); minMaxLoc(dstHist, &minValue, &maxValue, NULL, &maxloc); int anoThres = maxloc.y;//寻找次峰值 Scalar avgnum; Mat StdDevImg; meanStdDev(histoImg, avgnum, StdDevImg); double Stdnum = StdDevImg.at<double>(Point(0, 0)); int ThreStep = maxdata - mindata; int StepNum = 30; int OrStep = mindata + int(ThreStep / 10); int Dstep = int(ThreStep / 30.0 + 0.5); if (Dstep == 0) { Dstep = 1; StepNum = ThreStep; } Mat TempImg; histoImg.copyTo(TempImg); vector<vector<Point>> contours; vector<Vec4i> hierarchy; Point pointSN, maxPoint = Point(0, 0); int Marknumone = 0; int Marknumtwo = 0; int Marknumthree = 0; for (int i = 0; i < StepNum; i++) { vector<Point> SN; OrStep = OrStep + Dstep; threshold(histoImg, TempImg, OrStep, 255, CV_THRESH_BINARY); /*Mat element = getStructuringElement(MORPH_RECT,Size(2,2)); erode(TempImg, TempImg, cv::Mat()); dilate(TempImg, TempImg, cv::Mat());*/ TempImg = ~TempImg; /*stringstream strstrone; strstrone << "水渍动态图" << i << ".jpg"; imwrite(strstrone.str(), TempImg);*/ Mat BoundImg(TempImg.rows, TempImg.cols, CV_8UC1, Scalar(255)); Rect Wrect; Wrect.x = 1; Wrect.y = 1; Wrect.width = BoundImg.cols - 2; Wrect.height = BoundImg.rows - 2; rectangle(BoundImg, Wrect, Scalar(0), -1); Mat PlusImg(TempImg.rows + 2, TempImg.cols + 2, CV_8UC1, Scalar(255)); Mat PlusROI = PlusImg(Rect(1, 1, TempImg.cols, TempImg.rows)); TempImg.copyTo(PlusROI); Mat ContoursImg = PlusImg.clone(); findContours(ContoursImg, contours, hierarchy, RETR_TREE, CV_CHAIN_APPROX_SIMPLE); for (size_t j = 0; j < contours.size(); j++) { double area = cv::contourArea(contours[j]); pointSN.x = int(area); pointSN.y = j; SN.push_back(pointSN); } if (contours.size() != 0) { sort(SN.begin(), SN.end(), SortByM2); maxPoint = SN.back(); if (OrStep > anoThres - 5 && OrStep<anoThres + 20) Dstep = 1; else { Dstep = int(ThreStep / 30.0 + 0.5); } if (Dstep == 0) Dstep = 1; int k = maxPoint.y; Mat MarkImg(TempImg.rows, TempImg.cols, CV_8UC1, Scalar(0)); drawContours(MarkImg, contours, k, Scalar(255), -1); bitwise_and(BoundImg, MarkImg, MarkImg); int Mbound = 0;//判断轮廓是否到边界 Mbound = countNonZero(MarkImg); if (Mbound>0.5*(histoImg.cols)) break; if (contours[k].size() <= 4) continue; int son = hierarchy[k][2]; Point gravitycore = barycenter(contours[k]);//寻找轮廓重心 Rect maxcontours = boundingRect(contours[k]); int wValue = maxcontours.width / 12; gravitycore = gravitycore + Point(wValue - 1, wValue - 1); Mat gravityImg(TempImg.rows + 2 * wValue, TempImg.cols + 2 * wValue, CV_8UC1, Scalar(0)); Mat gravityImgROI = gravityImg(Rect(wValue, wValue, TempImg.cols, TempImg.rows)); TempImg.copyTo(gravityImgROI); Rect gravityrect = Rect(gravitycore - Point(1, 1), gravitycore + Point(2 * wValue, 2 * wValue) - Point(2, 2));//画出重心周围(2 * wValue)*(2 * wValue)的矩形区域 if (gravityrect.x < 0 || gravityrect.y < 0) continue; int avnum = countNonZero(gravityImg(Rect(gravityrect))); vector<Point> hull; convexHull(contours[k], hull, false); double promark = (contourArea(contours[k])) / (contourArea(hull)); if (son >= 0)//判断是否为父轮廓 { int sonarea = 0; for (size_t j = 0; j < contours.size(); j++) { if (hierarchy[j][3] == k&&contourArea(contours[j])>4.0) sonarea = sonarea + contourArea(contours[j]); } if (50 * sonarea>maxPoint.x)//此处忽略一些偶然出现的中空点 Marknumone++; } if (avnum < double(0.5 * gravityrect.width*gravityrect.width))//在重心区域中的白色点的数量是否过半 Marknumtwo++; if (promark < 0.6) Marknumthree++; } } if (Marknumone > 2 || Marknumtwo >= 2 || Marknumthree > 3)//缺陷点也可能偶然出现包含 { /*cout << "该点是水渍2" << endl;*/ } else { /*cout << "该点是缺陷2" << endl;*/ success++; } return success; }
void ModalityColor3DHistogram::update(Image& image, PatchSet* patchSet, Rect bounds) { Ptr<PatchSet> patches = Ptr<PatchSet>(reliablePatchesFilter.empty() ? patchSet : patchSet->filter(*reliablePatchesFilter)); Rect background_outer = expand(bounds, background_size + background_margin); Rect background_inner = expand(bounds, background_margin); MatND new_foreground, new_background; //tmp_float.convertTo(image.get(colorspace), CV_32F, 1.0/255.0); Mat arrays[] = {image.get(colorspace)}; Mat mask = image.get_mask(); mask.setTo(0); int half_size = patches->get_radius() * foreground_size; // Foreground histogram for (int i = 0; i < patches->size(); i++) { Point2f pos = patches->get_position(i); Rect r; r.x = CLAMP3( ((int)pos.x - half_size), 0, mask.cols); r.y = CLAMP3( ((int)pos.y - half_size), 0, mask.rows); r.width = CLAMP3( ((int)pos.x + half_size), 0, mask.cols) - r.x; r.height = CLAMP3( ((int)pos.y + half_size), 0, mask.rows) - r.y; if (r.width < 1 || r.height < 1) { continue; } mask(r) = 1; } calcHist(arrays, 1, channels, mask, new_foreground, 3, histSize, ranges, true, false); calcHist(arrays, 1, channels, mask, new_foreground, 3, histSize, ranges, true, false); mask.setTo(0); /*Point2f* convex_points = new Point2f[patches.size()]; for (int i = 0; i < patches.size(); i++) { convex_points[i] = patches.get_position(i); } fillconvex(mask, convex_points, convex_points.size(), Scalar(1));*/ Rect outer = expand(bounds, background_size + background_margin); rectangle(mask, outer.tl(), outer.br(), Scalar(1), FILLED); Rect inner = expand(bounds, background_margin); rectangle(mask, inner.tl(), inner.br(), Scalar(0), FILLED); rectangle(mask, background_outer.tl(), background_outer.br(), Scalar(1), FILLED); rectangle(mask, background_inner.tl(), background_inner.br(), Scalar(0), FILLED); calcHist(arrays, 1, channels, mask, new_background, 3, histSize, ranges, true, false); new_background += 1; // Merging model with new data float* ofd = (float*) foreground.data; float* nfd = (float*) new_foreground.data; float* obd = (float*) background.data; float* nbd = (float*) new_background.data; float* md = (float*) model.data; float apriori = (float)(bounds.width * bounds.height) / (float)(image.width() * image.height()); // TODO: justify factor int histCount = histSize[0] * histSize[1] * histSize[2]; float nfdSum = 0, nbdSum = 0; for (int i = 0; i < histCount; i++) { nfdSum += nfd[i]; nbdSum += nbd[i]; } float nfdSum2 = 0, nbdSum2 = 0; if (nfdSum > 0) { for (int i = 0; i < histCount; i++) { ofd[i] = foreground_presistence * ofd[i] + (1 - foreground_presistence) * (nfd[i] / nfdSum); nfdSum2 += ofd[i]; } } else { for (int i = 0; i < histCount; i++) { nfdSum2 += ofd[i]; } } if (nbdSum > 0) { for (int i = 0; i < histCount; i++) { obd[i] = background_presistence * obd[i] + (1 - background_presistence) * (nbd[i] / nbdSum); nbdSum2 += obd[i]; } } else { for (int i = 0; i < histCount; i++) { nbdSum2 += obd[i]; } } for (int i = 0; i < histCount; i++) { md[i] = ((apriori * (ofd[i] / nfdSum2)) / (apriori * (ofd[i] / nfdSum2) + (1 - apriori) * (obd[i] / nbdSum2))) * 255; } has_data = true; }
int main(int argc, char* argv[]) { char* filename = argc == 2 ? argv[1] : "test.avi"; VideoCapture capture(filename); if (!capture.isOpened()) return -1; namedWindow("Video", CV_WINDOW_AUTOSIZE); //namedWindow("Hist", CV_WINDOW_AUTOSIZE); vector<int> vec; int number = 0; int width = capture.get(CV_CAP_PROP_FRAME_WIDTH); int height = capture.get(CV_CAP_PROP_FRAME_HEIGHT); int wait = 1000 / capture.get(CV_CAP_PROP_FPS); int temp = width * PERCENT_EDGE; long int amount_pixels = (width * height) - (2 * temp * height); cv::Mat mask(height, width, CV_8U, 0.0); // создание маски, дл¤ подсчета гистограммы без учЄта краЄв rectangle(mask, cv::Point(temp,0), cv::Point(width - temp, height), 1, CV_FILLED); // ѕараметры гистограммы int histSize = 256; float range[] = { 0, 256 }; const float* histRange = { range }; bool uniform = true; bool accumulate = false; int hist_w = 512; int hist_h = 400; int bin_w = cvRound((double)hist_w / histSize); while (capture.isOpened()) { Mat src; if (!capture.read(src)) break; imshow("Video", src); // Ѕудем считать гистогрмму только дл¤ канала R std::vector<Mat> bgr_planes; split(src, bgr_planes); Mat r_hist; calcHist(&bgr_planes[2], 1, 0, mask, r_hist, 1, &histSize, &histRange, uniform, accumulate); Mat histImage(hist_h, hist_w, CV_8UC3, Scalar(0, 0, 0)); HistEvidenceAnaliz(&r_hist, histSize, amount_pixels, &vec, number); // Normalize the result to [ 0, histImage.rows ] //normalize(r_hist, r_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat()); // Draw Hist /*for (int i = 1; i < histSize; i++) { line(histImage, Point(bin_w*(i - 1), hist_h - cvRound(r_hist.at<float>(i - 1))), Point(bin_w*(i), hist_h - cvRound(r_hist.at<float>(i))), Scalar(0, 0, 255), 2, 8, 0); }*/ //imshow("Hist", histImage); if (waitKey(wait) >= 0) break; } vector<pair<int, int>> linking_frames; int counter = 0; int start = 0; for (vector<int>::iterator it = vec.begin(); it != vec.end(); it++) { // рассчЄт мест сшивки, чтобы избежать попадани¤ случайных кадров if ((it + 1) != vec.end()) { if (*(it + 1) - *it <= THRESHOLD_GAP_FRAME) { if (counter == 0) start = *it; counter++; } else { if (counter < THRESHOLD_FRAME_SEQUENCE) counter = 0; else { if ((*it) - start > MIN_LENGTH_SEQUENCE_FRAME) linking_frames.push_back(pair<int, int>(start, *it)); counter = 0; } } } else if (counter >= THRESHOLD_FRAME_SEQUENCE) { if ((*it) - start > MIN_LENGTH_SEQUENCE_FRAME) linking_frames.push_back(pair<int, int>(start, *it)); } } cout << "Linking frames:" << endl; for (vector<pair<int, int>>::iterator it = linking_frames.begin(); it != linking_frames.end(); it++) cout << it->first << " - " << it->second << endl; getchar(); return 0; }
/* * objective : get the gray level map of the input image and rescale it to the range [0-255] if rescale0_255=TRUE, simply trunks else */ static void rescaleGrayLevelMat(const cv::Mat &inputMat, cv::Mat &outputMat, const float histogramClippingLimit, const bool rescale0_255) { // adjust output matrix wrt the input size but single channel std::cout<<"Input image rescaling with histogram edges cutting (in order to eliminate bad pixels created during the HDR image creation) :"<<std::endl; //std::cout<<"=> image size (h,w,channels) = "<<inputMat.size().height<<", "<<inputMat.size().width<<", "<<inputMat.channels()<<std::endl; //std::cout<<"=> pixel coding (nbchannel, bytes per channel) = "<<inputMat.elemSize()/inputMat.elemSize1()<<", "<<inputMat.elemSize1()<<std::endl; // get min and max values to use afterwards if no 0-255 rescaling is used double maxInput, minInput, histNormRescalefactor=1.f; double histNormOffset=0.f; minMaxLoc(inputMat, &minInput, &maxInput); histNormRescalefactor=255.f/(maxInput-minInput); histNormOffset=minInput; std::cout<<"Hist max,min = "<<maxInput<<", "<<minInput<<" => scale, offset = "<<histNormRescalefactor<<", "<<histNormOffset<<std::endl; // rescale between 0-255, keeping floating point values cv::Mat normalisedImage; cv::normalize(inputMat, normalisedImage, 0.f, 255.f, cv::NORM_MINMAX); if (rescale0_255) normalisedImage.copyTo(outputMat); // extract a 8bit image that will be used for histogram edge cut cv::Mat intGrayImage; if (inputMat.channels()==1) { normalisedImage.convertTo(intGrayImage, CV_8U); } else { cv::Mat rgbIntImg; normalisedImage.convertTo(rgbIntImg, CV_8UC3); cv::cvtColor(rgbIntImg, intGrayImage, cv::COLOR_BGR2GRAY); } // get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation cv::Mat dst, hist; int histSize = 256; calcHist(&intGrayImage, 1, 0, cv::Mat(), hist, 1, &histSize, 0); cv::Mat normalizedHist; normalize(hist, normalizedHist, 1.f, 0.f, cv::NORM_L1, CV_32F); // normalize histogram so that its sum equals 1 // compute density probability cv::Mat denseProb=cv::Mat::zeros(normalizedHist.size(), CV_32F); denseProb.at<float>(0)=normalizedHist.at<float>(0); int histLowerLimit=0, histUpperLimit=0; for (int i=1; i<normalizedHist.size().height; ++i) { denseProb.at<float>(i)=denseProb.at<float>(i-1)+normalizedHist.at<float>(i); //std::cout<<normalizedHist.at<float>(i)<<", "<<denseProb.at<float>(i)<<std::endl; if ( denseProb.at<float>(i)<histogramClippingLimit) histLowerLimit=i; if ( denseProb.at<float>(i)<1.f-histogramClippingLimit) histUpperLimit=i; } // deduce min and max admitted gray levels float minInputValue = (float)histLowerLimit/histSize*255.f; float maxInputValue = (float)histUpperLimit/histSize*255.f; std::cout<<"=> Histogram limits " <<"\n\t"<<histogramClippingLimit*100.f<<"% index = "<<histLowerLimit<<" => normalizedHist value = "<<denseProb.at<float>(histLowerLimit)<<" => input gray level = "<<minInputValue <<"\n\t"<<(1.f-histogramClippingLimit)*100.f<<"% index = "<<histUpperLimit<<" => normalizedHist value = "<<denseProb.at<float>(histUpperLimit)<<" => input gray level = "<<maxInputValue <<std::endl; //drawPlot(denseProb, "input histogram density probability", histLowerLimit, histUpperLimit); drawPlot(normalizedHist, "input histogram", histLowerLimit, histUpperLimit); if(rescale0_255) // rescale between 0-255 if asked to { cv::threshold( outputMat, outputMat, maxInputValue, maxInputValue, 2 ); //THRESH_TRUNC, clips values above maxInputValue cv::threshold( outputMat, outputMat, minInputValue, minInputValue, 3 ); //THRESH_TOZERO, clips values under minInputValue // rescale image range [minInputValue-maxInputValue] to [0-255] outputMat-=minInputValue; outputMat*=255.f/(maxInputValue-minInputValue); } else { inputMat.copyTo(outputMat); // update threshold in the initial input image range maxInputValue=(float)((maxInputValue-255.f)/histNormRescalefactor+maxInput); minInputValue=(float)(minInputValue/histNormRescalefactor+minInput); std::cout<<"===> Input Hist clipping values (max,min) = "<<maxInputValue<<", "<<minInputValue<<std::endl; cv::threshold( outputMat, outputMat, maxInputValue, maxInputValue, 2 ); //THRESH_TRUNC, clips values above maxInputValue cv::threshold( outputMat, outputMat, minInputValue, minInputValue, 3 ); // } }
void CImageAnalysis::OnBnClickedHough() { CMainFrame * pwnd = (CMainFrame *)AfxGetMainWnd(); GetDlgItem(IDC_Show); Mat mat = imread( "F:\\4点定位剪切图.bmp",CV_LOAD_IMAGE_ANYDEPTH|CV_LOAD_IMAGE_ANYCOLOR ); Mat src,src1,src2; mat.copyTo(src); medianBlur(src,src1,5);//中值滤波 int threshold1=100; Mat hist; int histSize = 255; float range[] = { 0, 255 } ; const float* histRange = { range }; bool uniform = true; bool accumulate = false; calcHist( &src1, 1, 0, Mat(), hist, 1, &histSize, &histRange, uniform, accumulate ); int hist_w = 400; int hist_h = 400; int bin_w = cvRound( (double) 3*hist_w/histSize ); Mat histImage( hist_w, hist_h, CV_8UC3, Scalar( 255,255,255) ); normalize(hist, hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() ); for( int i = 1; i < 255; i++ ) { line( histImage, Point( bin_w*(i-1), hist_h - cvRound(hist.at<float>(i-1)) ) , Point( bin_w*(i), hist_h - cvRound(hist.at<float>(i)) ), Scalar( 0, 0, 0), 2, 8, 0 ); /*line( histImage, Point( bin_w*(i), hist_h - cvRound(hist.at<float>(i)) ) , Point( bin_w*(i), hist_h ), Scalar( 0, 0, 0), 2, 8, 0 );*/ } namedWindow("calcHist Demo", CV_WINDOW_AUTOSIZE ); imshow("calcHist Demo", histImage ); imwrite("hist.bmp",histImage); //Histogram(); //m_Show.ShowImage(hist,0); //threshold(src1,src1,threshold1,255,THRESH_BINARY);//二值图的生成; //vector<vector<Point> > contours; //vector<Vec3f> circles; //findContours(src2,contours,CV_RETR_LIST,CV_CHAIN_APPROX_NONE); // Apply the Hough Transform to find the circles /*HoughCircles( src,circles, CV_HOUGH_GRADIENT, 1, 1, 200, 10, 2,1000); for( size_t i = 0; i < circles.size(); i++ ) { Point center(cvRound(circles[i][0]), cvRound(circles[i][1])); int radius = cvRound(circles[i][2]); // circle center circle( src, center, 3, Scalar(0,255,0), -1, 8, 0 ); // circle outline circle( src, center, radius, Scalar(0,0,255), 3, 8, 0 ); }*/ /*pwnd->m_imageprocess.ImageRotate(src1,src2,1.5); Mat src3=Mat::zeros(src2.rows,src2.cols,src2.type());//src2.cols+10 Mat dst=Mat::zeros(src2.rows,src2.cols,src2.type()); int src2_half_rows=src2.rows/2; int src1_half_rows=src1.rows/2; int i,j,k; uchar *p,*q; MatIterator_<uchar> begin1,begin2; begin1=src1.begin<uchar>(); begin2=src2.begin<uchar>(); for(i=src2_half_rows-src1_half_rows;i!=src2_half_rows+src1_half_rows;++i) { p=src3.ptr<uchar>(i); for(j=0;j!=src1.cols;++j) { p[j]=*begin1; ++begin1; } } int dx=825,dy=5;//dst中只装载src1 小于(828,0)则向原点移动 for (int i = 0; i < dst.rows; i++) { p = dst.ptr<uchar>(i); for (int j = 0; j < dst.cols; j++) { //平移后坐标映射到原图像 int x = j - dx; int y = i - dy; //保证映射后的坐标在原图像范围内 if (x >= 0 && y >= 0 && x <src3. cols && y < src3.rows) p[j] = src3.ptr<uchar>(y)[x]; } } for(i=0;i!=src2.rows;++i) { q=dst.ptr<uchar>(i); for(j=0;j!=src2.cols;++j)//src2.cols+10 { k=q[j]/2+*begin2/2; q[j]=k; ++begin2; } } //m_Show.ShowImage(src2,0); //m_Show.ShowImage(dst,0); //imwrite("F:\\result1.bmp",dst); threshold(dst,dst,threshold1,255,THRESH_BINARY);//二值图的生成; vector<vector<Point> > contours; findContours(dst,contours,CV_RETR_LIST,CV_CHAIN_APPROX_NONE); //system("pause"); /// 绘出轮廓 Mat gray ; cvtColor(dst,gray , CV_GRAY2BGR); for( int i = 0; i<contours.size(); i++ ) { drawContours( gray, contours, i, Scalar(255,0,0), 2, 8, contours[0], 0, Point() ); } m_Show.ShowImage(hist,0);*/ }
bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size region_size) { CV_INSTRUMENT_REGION(); Mat img = _img.getMat(), cornersM = _corners.getMat(); int ncorners = cornersM.checkVector(2, CV_32F); CV_Assert( ncorners >= 0 ); Point2f* corners = cornersM.ptr<Point2f>(); const int nbins = 256; float ranges[] = {0, 256}; const float* _ranges = ranges; Mat hist; Mat black_comp, white_comp; for(int i = 0; i < ncorners; i++) { int channels = 0; Rect roi(cvRound(corners[i].x - region_size.width), cvRound(corners[i].y - region_size.height), region_size.width*2 + 1, region_size.height*2 + 1); Mat img_roi = img(roi); calcHist(&img_roi, 1, &channels, Mat(), hist, 1, &nbins, &_ranges); int black_thresh = 0, white_thresh = 0; segment_hist_max(hist, black_thresh, white_thresh); threshold(img, black_comp, black_thresh, 255.0, THRESH_BINARY_INV); threshold(img, white_comp, white_thresh, 255.0, THRESH_BINARY); const int erode_count = 1; erode(black_comp, black_comp, Mat(), Point(-1, -1), erode_count); erode(white_comp, white_comp, Mat(), Point(-1, -1), erode_count); std::vector<std::vector<Point> > white_contours, black_contours; findContours(black_comp, black_contours, RETR_LIST, CHAIN_APPROX_SIMPLE); findContours(white_comp, white_contours, RETR_LIST, CHAIN_APPROX_SIMPLE); if(black_contours.size() < 5 || white_contours.size() < 5) continue; // find two white and black blobs that are close to the input point std::vector<std::pair<int, float> > white_order, black_order; orderContours(black_contours, corners[i], black_order); orderContours(white_contours, corners[i], white_order); const float max_dist = 10.0f; if(black_order[0].second > max_dist || black_order[1].second > max_dist || white_order[0].second > max_dist || white_order[1].second > max_dist) { continue; // there will be no improvement in this corner position } const std::vector<Point>* quads[4] = {&black_contours[black_order[0].first], &black_contours[black_order[1].first], &white_contours[white_order[0].first], &white_contours[white_order[1].first]}; std::vector<Point2f> quads_approx[4]; Point2f quad_corners[4]; for(int k = 0; k < 4; k++) { std::vector<Point2f> temp; for(size_t j = 0; j < quads[k]->size(); j++) temp.push_back((*quads[k])[j]); approxPolyDP(Mat(temp), quads_approx[k], 0.5, true); findCorner(quads_approx[k], corners[i], quad_corners[k]); quad_corners[k] += Point2f(0.5f, 0.5f); } // cross two lines Point2f origin1 = quad_corners[0]; Point2f dir1 = quad_corners[1] - quad_corners[0]; Point2f origin2 = quad_corners[2]; Point2f dir2 = quad_corners[3] - quad_corners[2]; double angle = acos(dir1.dot(dir2)/(norm(dir1)*norm(dir2))); if(cvIsNaN(angle) || cvIsInf(angle) || angle < 0.5 || angle > CV_PI - 0.5) continue; findLinesCrossPoint(origin1, dir1, origin2, dir2, corners[i]); } return true; }
void HashAllItems(const Mat& feature, const Mat& proj, HashParam& p) { printf("start to build the bucket info ...\n"); int nl = feature.rows; int nc = feature.cols; //hash every items //core:compute value for hash, but the type is continus, float, and not normalised Mat projectValue; ProjectAll(p, feature, proj, projectValue); //change the range(minRange, maxRange) to {0, 1, ..., bucketLength-1} //hash process p.hashPoint.create(nl, p.bucketNumber, CV_32SC1); float mid = p.range /2; float pi = 3.14159265; #ifdef HASH_FUNC_ORIGINAL p.r = (p.bucketLength - 1)/p.range; #endif #ifdef HASH_FUNC_SIGMOID float y1 = 1.0; float x1 = 0; int acc = 0, wc = 0; int bins = p.bucketLength; float seg = p.range / bins; int delta = 4; vector<int> hh = calcHist(projectValue.ptr<float>(0), nl, bins); vector<int> shh = smooth(hh, bins); for(int i = 0; i < bins; i ++) { //printf("%d", shh[i]); if(shh[i] < delta) { acc += shh[i]; wc ++; } else break; } printf("wc: %d, acc: %d, seg: %f\n", wc, acc, seg); y1 = acc * bins * 1.0/ nl * 2; x1 = wc * seg - p.range/2; p.a = p.bucketLength; p.b = 1.0; p.c = -log(p.a/y1 - p.b)/x1; printf("x1: %f, y1: %f, p.c: %f\n", x1, y1, p.c); #endif #ifdef HASH_FUNC_ATAN p.a = (p.bucketLength - 1)/pi; p.b = 2.0; p.c = (p.bucketLength-1)/2; #endif #ifdef HASH_FUNC_LOG #endif FILE* out = fopen("/home/administrator/data.dat", "w"); for(int bucket = 0; bucket < p.bucketNumber; bucket ++) { for(int pp = 0; pp < nl; pp ++) { float y = projectValue.at<float>(bucket, pp) - p.minRange; if(bucket == 0) { fprintf(out, "%f ", y); } #ifdef HASH_FUNC_ORIGINAL y = y * r; #endif //x**3 #ifdef HASH_FUNC_POWER_THREE printf("using hash function 2\n"); y = 2 * y / p.range - 1; if(y < 0) { y = 0 - pow(0 - y, 1.0/3.0); } else { y = pow(y, 1.0/3.0); } y = (y + 1) * (p.bucketLength - 1) /2.0; #endif //sigmoid a/(1 + b * exp(-c*x)) #ifdef HASH_FUNC_SIGMOID y = y - mid; y = p.a / (1 + p.b * exp(-p.c * y)); #endif //atan a*atan(b*x)+c #ifdef HASH_FUNC_ATAN y = y - mid; y = p.a * atan(p.b * y) + p.c; #endif // #ifdef HASH_FUNC_LOG #endif int h = (int)(y); if(h < 0 || h >= p.bucketLength) { printf("y: %f, h: %d\n", y, h); programPause(); } p.bucketInfo[bucket][h].push_back(pp); //p.hashPoint.at<int>(pp, bucket) = h; } }//end of hash every items fclose(out); printf("\nexit projection ...\n"); /*int showPixels = 1; Mat img = Mat::zeros(nl/levels * 5, showPixels*levels, CV_8UC3); Scalar color(0, 255, 255); int showlevels = p.bucketInfo[0].size(); //int showlevels = levels; for(int i = 0; i < showlevels; i ++) { int v = p.bucketInfo[0][i].size(); printf("%d ", v); //rectangle(img, Point(i*showPixels, 0), Point(i*showPixels, min(v, img.rows-1)), color, CV_FILLED); } printf("\n"); namedWindow("bins"); imshow("bins", img); waitKey(0);*/ programPause(); }