std::vector<cv::Rect> hogDetection(cv::Mat sequence, cv::HOGDescriptor hog) { std::vector<cv::Rect> found; hog.detectMultiScale(sequence, found, 0, cv::Size(8,8), cv::Size(32,32), 1.05, 2); return found; }
int process_image(cv::Mat & input_img) { std::vector<cv::Rect> found_rectangles; hog.detectMultiScale(input_img, found_rectangles); for (unsigned int rec_idx = 0; rec_idx < found_rectangles.size(); ++rec_idx) cv::rectangle(input_img, found_rectangles[rec_idx], CV_RGB(255,0,0), 2); cv::imwrite("input_img.png", input_img); cv::imshow("input_img", input_img); } // end process_image()
void peopleDetectApp::update() { if( getElapsedFrames() % 30 == 0 ) { if(stillSurface) { //cv::Mat newFrame = toOcv( Channel( capture.getSurface() )); // capture the frame cv::Mat newFrame = toOcv( Channel(stillSurface)); vector<cv::Rect> found, found_filtered; // run the detector with default parameters. to get a higher hit-rate // (and more false alarms, respectively), decrease the hitThreshold and // groupThreshold (set groupThreshold to 0 to turn off the grouping completely). //hog.detectMultiScale(newFrame, foundLocations, 0, cv::Size(8,8), cv::Size(32,32), 1.05, 2); hog.detectMultiScale(newFrame, foundLocations, 0, cv::Size(8,8), cv::Size(32,32), 1.05, 2); int i,j; for( i = 0; i < foundLocations.size(); i++ ) { cv::Rect r = foundLocations[i]; for( j = 0; j < foundLocations.size(); j++ ) { if( j != i && (r & foundLocations[j]) == r) break; if( j == foundLocations.size() ) found_filtered.push_back(r); } } console() << " found # " << foundLocations.size() << " filtered to " << found_filtered.size() << endl; ciPeople.clear(); for( i = 0; i < foundLocations.size(); i++ ) { cv::Rect r = foundLocations[i]; // the HOG detector returns slightly larger rectangles than the real objects. // so we slightly shrink the rectangles to get a nicer output. r.x += cvRound(r.width*0.1); r.width = cvRound(r.width*0.8); r.y += cvRound(r.height*0.07); r.height = cvRound(r.height*0.8); ciPeople.push_back(fromOcv(r)); } } } }
void detect(shared_data& in_data) { std::vector<cv::Rect> temp_boxes; for (auto i: in_data.im_data.im_ROI) { temp_resize.create(in_data.im_data.image(i).rows*opt.scale, in_data.im_data.image(i).cols*opt.scale, CV_64FC3); cv::resize(in_data.im_data.image(i), temp_resize, temp_resize.size()); //these magic numbers are from opencv if (temp_resize.rows > 128 && temp_resize.cols > 64) { temp_boxes.clear(); hog.detectMultiScale(temp_resize, temp_boxes, 0, cv::Size(8, 8), cv::Size(0, 0), 1.05); for (auto j: temp_boxes) { i.x -= center_x; i.y -= center_y; i.x = opt.scale*i.x+scenter_x; i.y = opt.scale*i.y+scenter_y; j.x += i.x; j.y += i.y; j.x -= scenter_x; j.y -= scenter_y; j.x = 1.0/opt.scale*j.x+center_x; j.y = 1.0/opt.scale*j.y+center_y; j.height/=opt.scale; j.width/=opt.scale; box_safe(j, 0, 0, in_data.im_data.image.cols-1, in_data.im_data.image.rows-1); shared_data::bbox temp; temp = j; temp.type_label = TYPE_PERSON; in_data.im_boxes.push_back(temp); } } } }
void ocvOpticalFlowApp::findPeople(cv::Mat curImg) { cv::Mat monoImg; cv::cvtColor(curImg, monoImg, CV_BGR2GRAY); hog.detectMultiScale(monoImg, mFoundPeople); }