/** * Test the trained detector against the same training set to get an approximate idea of the detector. * Warning: This does not allow any statement about detection quality, as the detector might be overfitting. * Detector quality must be determined using an independent test set. * @param hog */ static void detectTrainingSetTest(const HOGDescriptor& hog, const double hitThreshold, const vector<string>& posFileNames, const vector<string>& negFileNames) { unsigned int truePositives = 0; unsigned int trueNegatives = 0; unsigned int falsePositives = 0; unsigned int falseNegatives = 0; vector<Point> foundDetection; // Walk over positive training samples, generate images and detect for (vector<string>::const_iterator posTrainingIterator = posFileNames.begin(); posTrainingIterator != posFileNames.end(); ++posTrainingIterator) { const Mat imageData = imread(*posTrainingIterator, 0); hog.detect(imageData, foundDetection, hitThreshold, winStride, trainingPadding); if (foundDetection.size() > 0) { ++truePositives; falseNegatives += foundDetection.size() - 1; } else { ++falseNegatives; } } // Walk over negative training samples, generate images and detect for (vector<string>::const_iterator negTrainingIterator = negFileNames.begin(); negTrainingIterator != negFileNames.end(); ++negTrainingIterator) { const Mat imageData = imread(*negTrainingIterator, 0); hog.detect(imageData, foundDetection, hitThreshold, winStride, trainingPadding); if (foundDetection.size() > 0) { falsePositives += foundDetection.size(); } else { ++trueNegatives; } } printf("Results:\n\tTrue Positives: %u\n\tTrue Negatives: %u\n\tFalse Positives: %u\n\tFalse Negatives: %u\n", truePositives, trueNegatives, falsePositives, falseNegatives); }
JNIEXPORT void JNICALL Java_org_opencv_objdetect_HOGDescriptor_detect_10 (JNIEnv* env, jclass , jlong self, jlong img_nativeObj, jlong foundLocations_mat_nativeObj, jlong weights_mat_nativeObj, jdouble hitThreshold, jdouble winStride_width, jdouble winStride_height, jdouble padding_width, jdouble padding_height, jlong searchLocations_mat_nativeObj) { static const char method_name[] = "objdetect::detect_10()"; try { LOGD("%s", method_name); vector<Point> foundLocations; Mat& foundLocations_mat = *((Mat*)foundLocations_mat_nativeObj); vector<double> weights; Mat& weights_mat = *((Mat*)weights_mat_nativeObj); vector<Point> searchLocations; Mat& searchLocations_mat = *((Mat*)searchLocations_mat_nativeObj); Mat_to_vector_Point( searchLocations_mat, searchLocations ); HOGDescriptor* me = (HOGDescriptor*) self; //TODO: check for NULL Mat& img = *((Mat*)img_nativeObj); Size winStride((int)winStride_width, (int)winStride_height); Size padding((int)padding_width, (int)padding_height); me->detect( img, foundLocations, weights, (double)hitThreshold, winStride, padding, searchLocations ); vector_Point_to_Mat( foundLocations, foundLocations_mat ); vector_double_to_Mat( weights, weights_mat ); return; } catch(const std::exception &e) { throwJavaException(env, &e, method_name); } catch (...) { throwJavaException(env, 0, method_name); } return; }
/*compute sim based on the exsiting hog descriptor and detector apperance model*/ double particleFilter::computeSim(const HOGDescriptor& hog, Mat& frame, int x, int y, double hitThreshold){ vector<Point> searchLoc; vector<Point> foundLoc; vector<double> weights; searchLoc.push_back(Point((double)x, (double)y)); hog.detect(frame, foundLoc, weights, hitThreshold, Size(24,48), Size(1,1), searchLoc); if(weights.empty()) return 0; return MIN(1,(MAX(0,weights.at(0)-2))); }
void classify() { HOGDescriptor hog; hog.winSize = Size(sampleSize); SVMLight::SVMClassifier c("classifier.dat"); std::vector<float> descriptorVector = c.getDescriptorVector(); std::cout << descriptorVector.size() << std::endl; hog.setSVMDetector(descriptorVector); Mat m = imread("/Users/alberto/tmp/samples/fullframe9.png"); Mat m1 = m.clone(); std::vector<Rect> found; std::vector<Point> foundPoint; Size padding(Size(0, 0)); std::cout << "try to detect.." << std::endl; //hog.detectMultiScale(m, found, 0.0, winStride, padding, 1.01, 0.1); hog.detect(m, foundPoint, 0.0, winStride, padding); std::cout << "found: " << foundPoint.size() << std::endl; for(int i=0; i<foundPoint.size(); ++i) { Rect r; r.x = foundPoint[i].x; r.y = foundPoint[i].y; r.width = 48; r.height = 48; rectangle(m, r, Scalar(255,255,255)); Mat imageroi = m1(r); std::stringstream ss; ss << "/Users/alberto/tmp/samples/tmp/test"; ss << i; ss << ".png"; cv::imwrite(ss.str(), imageroi); } imshow("result", m); }
JNIEXPORT void JNICALL Java_org_opencv_objdetect_HOGDescriptor_detect_11 (JNIEnv* env, jclass , jlong self, jlong img_nativeObj, jlong foundLocations_mat_nativeObj, jlong weights_mat_nativeObj) { static const char method_name[] = "objdetect::detect_11()"; try { LOGD("%s", method_name); vector<Point> foundLocations; Mat& foundLocations_mat = *((Mat*)foundLocations_mat_nativeObj); vector<double> weights; Mat& weights_mat = *((Mat*)weights_mat_nativeObj); HOGDescriptor* me = (HOGDescriptor*) self; //TODO: check for NULL Mat& img = *((Mat*)img_nativeObj); me->detect( img, foundLocations, weights ); vector_Point_to_Mat( foundLocations, foundLocations_mat ); vector_double_to_Mat( weights, weights_mat ); return; } catch(const std::exception &e) { throwJavaException(env, &e, method_name); } catch (...) { throwJavaException(env, 0, method_name); } return; }
int main (int argc, const char * argv[]) { printf("Hello\n"); cout << "Hello Artyom" << endl << flush ; VideoCapture cap; cap.open("http://192.168.1.105:8080/?action=stream"); /* cap.set(CV_CAP_PROP_FRAME_WIDTH, 320); cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240); */ if (!cap.isOpened()) return -1; Mat img; /* VideoCapture cap1(1); cap.set(CV_CAP_PROP_FRAME_WIDTH, 320); cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240); if (!cap1.isOpened()) return -1; Mat img2; */ string Pos = ""; HOGDescriptor hog; hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector()); string posPoint = ""; string posRect =""; while (true) { cap >> img; // cap1 >> img2; if (!img.data) continue; vector<Rect> found, found_filtered; vector<Point> found1, found_filtered1; hog.detectMultiScale(img, found, 0, Size(8,8), Size(32,32), 1.05, 2); hog.detect(img, found1, 0, Size(8,8), Size(0,0)); size_t i, j; for (i = 0 ; i < found1.size(); i++){ Point tempPoint = found1[i]; Rect r ; if (tempPoint.x > 0 && tempPoint.y > 0) { r.x += tempPoint.x; r.y += tempPoint.y; r.width = 10; r.height = 10; rectangle(img, r.tl(), r.br(), cv::Scalar(255,0,0), 2); string x = to_string(r.x); string y = to_string(r.y); posPoint = "Pos: x:" + x+ " y: " + y; } } for (i=0; i<found.size(); i++) { Rect r = found[i]; for (j=0; j<found.size(); j++) if (j!=i && (r & found[j])==r) break; if (j==found.size()) found_filtered.push_back(r); } for (i=0; i<found_filtered.size(); i++) { Rect r = found_filtered[i]; r.x += cvRound(r.width*0.1); r.width = cvRound(r.width*0.8); r.y += cvRound(r.height*0.06); r.height = cvRound(r.height*0.9); // string x = to_string(r.x); string y = to_string(r.y); // posRect = "Pos: x:" + x+ " y: " + y; rectangle(img, r.tl(), r.br(), cv::Scalar(0,255,0), 2); } int number = 5; char text[255]; sprintf(text, "Score %d", (int)number); CvFont font; double hScale=1.0; double vScale=1.0; int lineWidth=1; cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, hScale,vScale,0,lineWidth); IplImage* img1 = new IplImage(img); char* p = new char[posRect.length()+1]; memcpy(p, posRect.c_str(), posRect.length()+1); cvPutText(img1, p, cvPoint(200,400), &font, cvScalar(0,255,0)); char* p2 = new char[posPoint.length()+1]; memcpy(p2, posPoint.c_str(), posPoint.length()+1); cvPutText(img1, p2, cvPoint(200,430), &font, cvScalar(255,255,255)); imshow("video capture", img); // imshow("video capture2", img2); if (waitKey(1) >= 0) break; } //namedWindow("video capture", CV_WINDOW_AUTOSIZE); return 0; }