vector<float> HOGDetectorGPU::getHOGdescriptors(cv::HOGDescriptor& hog, Mat& image){
    resize(image, image, Size(WIN_X,WIN_Y) );
    Mat img;
    cvtColor(image, img, CV_RGB2GRAY);
    hog.winSize.height = WIN_X;
    hog.winSize.width = WIN_Y;
    // Size(128,64), //winSize
    // Size(16,16), //blocksize
    // Size(8,8), //blockStride,
    // Size(8,8), //cellSize,
    // 9, //nbins,
    // 0, //derivAper,
    // -1, //winSigma,
    // 0, //histogramNormType,
    // 0.2, //L2HysThresh,
    // 0 //gammal correction,
    // //nlevels=64
    //);
    vector<float> descriptorsValues;
    vector<Point> locations;
    hog.compute( img, descriptorsValues, Size(0,0), Size(0,0), locations);
    cout << "HOG descriptor size is " << hog.getDescriptorSize() << endl;
    cout << "img dimensions: " << img.cols << " width x " << img.rows << "height" << endl;
    cout << "Found " << descriptorsValues.size() << " descriptor values" << endl;
    cout << "Nr of locations specified : " << locations.size() << endl;
    return descriptorsValues;

}
Example #2
0
 people_detection_hog(const options& in_opt):
     opt(in_opt.person_d)
 {
     scenter_x=in_opt.width*opt.scale/2.0;
     scenter_y=in_opt.height*opt.scale/2.0;
     center_x=in_opt.width/2.0;
     center_y=in_opt.height/2.0;
     hog.setSVMDetector(hog.getDefaultPeopleDetector());
 }
static void calculateFeaturesFromInput( cv::Mat imageData, std::vector< float >& featureVector, cv::HOGDescriptor& hog )
{
	std::vector< cv::Point > locations;
	hog.compute( imageData, featureVector, winStride, trainingPadding, locations );

	imageData.release();
}
Example #4
0
int main(int argc, char *argv[]) {
  hog.setSVMDetector(cv::HOGDescriptor::getDefaultPeopleDetector());
  //hog.setSVMDetector(cv::HOGDescriptor::getDaimlerPeopleDetector());

  cv::Mat input_img;
  if (argc == 2) {
    //printf("Usage %s [file]\n", argv[0]);
    // return -1;
    std::string file(argv[1]);
    input_img = cv::imread(file, CV_LOAD_IMAGE_COLOR);
    process_image(input_img);
    cv::waitKey(0);
  }

  //cv::VideoCapture cap(0);
  cv::VideoCapture cap;
  cap.open("/home/arnaud/Desktop/Baudet-Rob.mp4");
  if (!cap.isOpened()) {
        printf("Cannot open camera!"); return -1;
  }

  while(true) {
    cap >> input_img;
    cv::resize(input_img, input_img, cv::Size(320, 240));
    process_image(input_img);
    cv::waitKey(15);
  }
  return 0;
}
Example #5
0
std::vector<cv::Rect> hogDetection(cv::Mat sequence, cv::HOGDescriptor hog)
{
    std::vector<cv::Rect> found;

    hog.detectMultiScale(sequence, found, 0, cv::Size(8,8), cv::Size(32,32), 1.05, 2);

    return found;
}
Example #6
0
int process_image(cv::Mat & input_img) {
  std::vector<cv::Rect> found_rectangles;
  hog.detectMultiScale(input_img, found_rectangles);
  for (unsigned int rec_idx = 0; rec_idx < found_rectangles.size(); ++rec_idx)
    cv::rectangle(input_img, found_rectangles[rec_idx], CV_RGB(255,0,0), 2);
  cv::imwrite("input_img.png", input_img);
  cv::imshow("input_img", input_img);
} // end process_image()
void peopleDetectApp::setup()
{
    hog.setSVMDetector(cv::HOGDescriptor::getDefaultPeopleDetector());
    
    capture = Capture(640, 480);
    capture.start();
    
    stillSurface = loadImage(loadAsset( "people_in_park.jpg" ) );
	stillTex = gl::Texture(stillSurface);
    
}
void ocvOpticalFlowApp::setup()
{
	mDrawPoints = true;
	
//	mCapture = Capture( 640, 480 );
//	mCapture.start();
    
	fs::path moviePath = getOpenFilePath();
	if( ! moviePath.empty() )
		loadMovieFile( moviePath );  

    hog.setSVMDetector( cv::HOGDescriptor::getDefaultPeopleDetector() );

}
Example #9
0
cv::Mat computeHOG(const cv::Mat &img_gray, cv::HOGDescriptor &d) {
  int correct_width = img_gray.cols - (img_gray.cols - d.blockSize.width) % d.blockStride.width;
  int correct_height = img_gray.rows - (img_gray.rows - d.blockSize.height) % d.blockStride.height;
  d.winSize = cv::Size(correct_width, correct_height);

  //Scale input image to the correct size
  cv::Mat resize_img;
  cv::resize(img_gray, resize_img, d.winSize);

  std::vector<float> descriptorsValues;
  std::vector<cv::Point> locations;
  d.compute(resize_img, descriptorsValues, cv::Size(0,0), cv::Size(0,0), locations);

  cv::Mat matDescriptorsValues(descriptorsValues, true);
  return matDescriptorsValues;
}
void peopleDetectApp::update()
{
    
    if( getElapsedFrames() % 30 == 0 )
    {
        if(stillSurface) {
            //cv::Mat newFrame = toOcv( Channel( capture.getSurface() )); // capture the frame
            cv::Mat newFrame = toOcv( Channel(stillSurface));
            
            vector<cv::Rect> found, found_filtered;
            // run the detector with default parameters. to get a higher hit-rate
            // (and more false alarms, respectively), decrease the hitThreshold and
            // groupThreshold (set groupThreshold to 0 to turn off the grouping completely).
            //hog.detectMultiScale(newFrame, foundLocations, 0, cv::Size(8,8), cv::Size(32,32), 1.05, 2);
            hog.detectMultiScale(newFrame, foundLocations, 0, cv::Size(8,8), cv::Size(32,32), 1.05, 2);
            
            int i,j;
            for( i = 0; i < foundLocations.size(); i++ )
            {
                cv::Rect r = foundLocations[i];
                for( j = 0; j < foundLocations.size(); j++ ) {
                    if( j != i && (r & foundLocations[j]) == r)
                        break;
                    if( j == foundLocations.size() )
                        found_filtered.push_back(r);
                }
            }
            
            console() << " found # " << foundLocations.size() << " filtered to " << found_filtered.size() << endl;
            
            ciPeople.clear();
            for( i = 0; i < foundLocations.size(); i++ ) {
                
                cv::Rect r = foundLocations[i];
                // the HOG detector returns slightly larger rectangles than the real objects.
                // so we slightly shrink the rectangles to get a nicer output.
                r.x += cvRound(r.width*0.1);
                r.width = cvRound(r.width*0.8);
                r.y += cvRound(r.height*0.07);
                r.height = cvRound(r.height*0.8);
                ciPeople.push_back(fromOcv(r));
            }
        }
    }
}
Example #11
0
 void detect(shared_data& in_data)
 {
     std::vector<cv::Rect> temp_boxes;
     for (auto i: in_data.im_data.im_ROI) {
         temp_resize.create(in_data.im_data.image(i).rows*opt.scale, in_data.im_data.image(i).cols*opt.scale, CV_64FC3);
         cv::resize(in_data.im_data.image(i), temp_resize, temp_resize.size());
         //these magic numbers are from opencv
         if (temp_resize.rows > 128 && temp_resize.cols > 64) {
             temp_boxes.clear();
             hog.detectMultiScale(temp_resize,
                                  temp_boxes,
                                  0,
                                  cv::Size(8, 8),
                                  cv::Size(0, 0),
                                  1.05);
             for (auto j: temp_boxes) {
                 i.x -= center_x;
                 i.y -= center_y;
                 i.x = opt.scale*i.x+scenter_x;
                 i.y = opt.scale*i.y+scenter_y;
                 j.x += i.x;
                 j.y += i.y;
                 j.x -= scenter_x;
                 j.y -= scenter_y;
                 j.x = 1.0/opt.scale*j.x+center_x;
                 j.y = 1.0/opt.scale*j.y+center_y;
                 j.height/=opt.scale;
                 j.width/=opt.scale;
                 box_safe(j, 0, 0, in_data.im_data.image.cols-1, in_data.im_data.image.rows-1);
                 shared_data::bbox temp;
                 temp = j;
                 temp.type_label = TYPE_PERSON;
                 in_data.im_boxes.push_back(temp);
             }
         }
     }
 }
void ocvOpticalFlowApp::findPeople(cv::Mat curImg)
{
    cv::Mat monoImg; 
    cv::cvtColor(curImg, monoImg, CV_BGR2GRAY);
    hog.detectMultiScale(monoImg, mFoundPeople);
}