Predictor getTestPredictorLibSvm(){ //Load sample structure SampleStructure sampleStructure = getTestSampleStructure(); //Load training data Array<Sample> trainingData = getTrainingSampleData(); //Make creator lambdas MlCreators creators; creators.rc = new RegressorCreator[1]; creators.rc[0] = [](SampleStructure* st, Array<Sample> training, unsigned index){ svm_parameter svm_param; svm_param.svm_type = EPSILON_SVR; // default for frac.libsvr is EPSILON_SVR, not C_SVC svm_param.kernel_type = LINEAR; // noto changed default from RBF svm_param.degree = 3; svm_param.gamma = 0; // 1/num_features svm_param.coef0 = 0; svm_param.nu = 0.5; svm_param.cache_size = 100; svm_param.C = 1; svm_param.eps = 1e-3; svm_param.p = 0.0; // noto changed default from 0.1 svm_param.shrinking = 1; svm_param.probability = 0; svm_param.nr_weight = 0; svm_param.weight_label = NULL; svm_param.weight = NULL; svm_param.timeout = 86400; // noto Regressor* r = new SvmRegressor(*st, index, &svm_param); r->train(training); return r; }; creators.cc = new ClassifierCreator[1]; creators.cc[0] = [](SampleStructure* st, Array<Sample> training, unsigned index){ Classifier* c = new WafflesDecisionTreeClassifier(*st, index, true); c->train(training); return c; }; creators.bcc = new BinaryClassifierCreator[1]; creators.bcc[0] = [](SampleStructure* st, Array<Sample> training, unsigned index){ BinaryClassifier* r = new ConstantBinaryClassifier(false); r->train(training); return r; }; //TODO build + use takeBest. Predictor p = Predictor(sampleStructure, trainingData, creators); return p; }
void check_split(Index& idx, Classifier& c, double min_accuracy) { // create splits std::vector<doc_id> docs = idx.docs(); std::mt19937 gen(47); std::shuffle(docs.begin(), docs.end(), gen); size_t split_idx = docs.size() / 8; std::vector<doc_id> train_docs{docs.begin() + split_idx, docs.end()}; std::vector<doc_id> test_docs{docs.begin(), docs.begin() + split_idx}; // train and test c.train(train_docs); classify::confusion_matrix mtx = c.test(test_docs); ASSERT_GREATER(mtx.accuracy(), min_accuracy); ASSERT_LESS(mtx.accuracy(), 100.0); }
int testRange(std::vector<const typename Classifier<Val>::ExampleTrain*>& tcollect, int start_idx, int end_idx, Classifier<Val>& classifier ) { typename Classifier<Val>::ExamplesTrain test; typename Classifier<Val>::ExamplesTrain train; typename std::vector<const typename Classifier<Val>::ExampleTrain* >::iterator it = tcollect.begin(); for(int idx=0; it != tcollect.end(); ++it, ++idx ) { if( idx < start_idx ) train.push_back( **it ); else if( idx < end_idx ) test.push_back( **it ); else train.push_back( **it ); } classifier.reset(); classifier.train(train); return checkClassifier( test, classifier ); }
Predictor getTestPredictorWaffles(){ //Load sample structure SampleStructure sampleStructure = getTestSampleStructure(); //Load training data Array<Sample> trainingData = getTrainingSampleData(); //Make creator lambdas MlCreators creators; //Lambdas and side effects are best friends! creators.rc = new RegressorCreator[1]; creators.rc[0] = [](SampleStructure* st, Array<Sample> training, unsigned index){ //Regressor* r = new ConstantRegressor(0); Regressor* r = new WafflesDecisionTreeRegressor(*st, index); r->train(training); return r; }; creators.cc = new ClassifierCreator[1]; creators.cc[0] = [](SampleStructure* st, Array<Sample> training, unsigned index){ Classifier* c = new WafflesDecisionTreeClassifier(*st, index, true); //TODO Why does this random give identical results to the default? c->train(training); return c; }; creators.bcc = new BinaryClassifierCreator[1]; creators.bcc[0] = [](SampleStructure* st, Array<Sample> training, unsigned index){ BinaryClassifier* r = new ConstantBinaryClassifier(false); r->train(training); return r; }; //TODO build + use takeBest. //std::cout << "Making predictor." << std::endl; Predictor p = Predictor(sampleStructure, trainingData, creators); //std::cout << "Made predictor." << std::endl; return p; }
int main(int argc, char *argv[]) { char *configurationFile, *trainingFile, *featuresFile; bool bVerbose; char **args; Classifier classifier; // set defaults configurationFile = NULL; // set using -c <filename> bVerbose = false; // turn on with -v trainingFile = NULL; featuresFile = NULL; // check arguments args = argv + 1; while (argc-- > 2) { if (!strcmp(*args, "-c")) { argc--; args++; if (configurationFile != NULL) { usage(); return -1; } configurationFile = *args; } else if (!strcmp(*args, "-f")) { argc--; args++; featuresFile = *args; } else if (!strcmp(*args, "-k")) { argc--; args++; sscanf(*args, "%i", &(classifier.num_clusters)); printf("Set num_clusters to %d \n", classifier.num_clusters); } else if (!strcmp(*args, "-o")) { argc--; args++; int d; sscanf(*args, "%i", &d); classifier.max_others = d; printf("Set max_others to %d \n", d); } else if (!strcmp(*args, "-lk")) { classifier.kmeans_load = true; } else if (!strcmp(*args, "-sk")) { classifier.kmeans_save = true; } else if (!strcmp(*args, "-b")) { classifier.bayes.on = true; } else if (!strcmp(*args, "-s")) { classifier.svm.on = true; } else if (!strcmp(*args, "-r")) { classifier.rtrees.on = true; } else if (!strcmp(*args, "-t")) { classifier.btrees.on = true; } else if (!strcmp(*args, "--save")) { classifier.save_all = true; } else if (!strcmp(*args, "--load")) { classifier.load_all = true; } else if (!strcmp(*args, "-h")) { usage(); return 0; } else if (!strcmp(*args, "-v")) { bVerbose = !bVerbose; } else { cerr << "ERROR: unrecognized option " << *args << endl; return -1; } args++; } if (argc != 1) { usage(); exit(-1); } cout << endl; // load the training file list TTrainingFileList fileList; fileList = getTrainingFiles(*args, ".jpg"); if (!classifier.extract(fileList)) { cerr << "ERROR: coult not extract features" << endl; exit(-1); } // now train the classifier if (!classifier.train()) { cerr << "ERROR: could not train classifier" << endl; exit(-1); } // // // save classifier configuration // if (configurationFile != NULL) { // if (!classifier.saveState(configurationFile)) { // cerr << "ERROR: could not save classifier configuration" << endl; // exit(-1); // } // } return 0; }
/** * trains an svm model using the InriaPerson training dataset * choses a number of negative detections randomly chosen from each image * saves the freshly trained model to the harddisk * @param string modelFile: path to the model file */ void trainSVM(string modelFile){ vector<string> pos_examples; vector<string> neg_examples; Classifier classifier = Classifier(); RNG random_index = RNG( time (NULL) ); Rect roi(Point(16,16), Point(80, 144)); Preprocessing::loadPathsByDirectory(TRAIN_POS_NORMALIZED, pos_examples); Preprocessing::loadPathsByDirectory(TRAIN_NEG_ORIGINAL, neg_examples); for(vector<string>::iterator posIt = pos_examples.begin(); posIt != pos_examples.end(); ++posIt) { OriginalImage img( *posIt ); ( img ).image = ( img ).image(roi); FeatureExtraction::extractHOGFeatures( img ); classifier.addPositiveExample(img.hog_features); } for(vector<string>::iterator negIt = neg_examples.begin(); negIt != neg_examples.end(); ++negIt) { OriginalImage img( *negIt ); FeatureExtraction::computeHOGPyramid( img ); FeatureExtraction::computeSlidingWindows( img ); for(std::vector<Image>::iterator scaleIt = ( img ).lower_images.begin(); scaleIt != ( img ).lower_images.end(); ++scaleIt) { FeatureExtraction::computeSlidingWindows(*scaleIt); } int count = 0; double howMuch = ( (double)( MAXRANDOMELEMENTS ) / (double)( img.lower_images.size() + 1 ) ); for( int i = 0; i < (int)( howMuch ); i++ ) { classifier.addNegativeExample( img.slidingWindows[ random_index.uniform( 0, img.slidingWindows.size() ) ].hog_features ); count++; } howMuch = howMuch + ( (double)( MAXRANDOMELEMENTS ) / (double)( img.lower_images.size() + 1) ) - (int)howMuch; if( howMuch < MAXRANDOMELEMENTS ) for( int current = 0; current < img.lower_images.size(); current++ ) { for( int i = 0; i < (int)( howMuch ); i++ ) { int y = random_index.uniform( 0, img.lower_images[ current ].slidingWindows.size() ); Mat hog_features = img.lower_images[ current ].slidingWindows[ y ].hog_features; classifier.addNegativeExample( hog_features ); count++; } howMuch = howMuch + ( (double)( MAXRANDOMELEMENTS ) / (double)( img.lower_images.size() + 1) ) - (int)howMuch; } while( count < MAXRANDOMELEMENTS ) { int x = random_index.uniform( 0, img.lower_images.size() ); int y = random_index.uniform( 0, img.lower_images[ x ].slidingWindows.size() ); Mat hog_features = img.lower_images[ x ].slidingWindows[ y ].hog_features; classifier.addNegativeExample( hog_features ); count++; } } classifier.train( MODEL_STANDARD_FILE ); classifier.loadModel( MODEL_STANDARD_FILE ); for(vector<string>::iterator negIt = neg_examples.begin(); negIt != neg_examples.end(); ++negIt) { OriginalImage img( *negIt ); FeatureExtraction::computeHOGPyramid( img ); FeatureExtraction::computeSlidingWindows( img ); for( vector<SlidingWindow>::iterator it= img.slidingWindows.begin(); it != img.slidingWindows.end(); ++it ) { if( classifier.classify( ( *it ).hog_features ) > 0.0 ) { classifier.addNegativeExample( (*it ).hog_features ); } } for(std::vector<Image>::iterator scaleIt = ( img ).lower_images.begin(); scaleIt != ( img ).lower_images.end(); ++scaleIt) { FeatureExtraction::computeSlidingWindows(*scaleIt); for( vector<SlidingWindow>::iterator it= ( *scaleIt ).slidingWindows.begin(); it != ( *scaleIt ).slidingWindows.end(); ++it ) { if( classifier.classify( ( *it ).hog_features ) > 0.0 ) { classifier.addNegativeExample( (*it ).hog_features ); } } } } classifier.train( MODEL_HARD_EX_FILE ); }
void track3(ImageSource::InputDevice input, int numBaseClassifier, float overlap, float searchFactor, char* resultDir, Rect initBB, char* source = NULL) { unsigned char *curFrame=NULL; int key; //choose the image source ImageSource *imageSequenceSource; switch (input) { case ImageSource::AVI: imageSequenceSource = new ImageSourceAVIFile(source); break; case ImageSource::DIRECTORY: imageSequenceSource = new ImageSourceDir(source); break; case ImageSource::USB: imageSequenceSource = new ImageSourceUSBCam(); break; default: return; } ImageHandler* imageSequence = new ImageHandler (imageSequenceSource); imageSequence->getImage(); imageSequence->viewImage ("Tracking...", false); trackingRect=initBB; curFrame = imageSequence->getGrayImage (); ImageRepresentation* curFrameRep = new ImageRepresentation(curFrame, imageSequence->getImageSize()); Rect wholeImage; wholeImage = imageSequence->getImageSize(); // Pula o inicio do video for(int t = 0; t < 60; t++, imageSequence->getImage()); IplImage *image; image = imageSequence->getIplImage(); //cvWaitKey(0); printf ("init tracker..."); Classifier* tracker; tracker = new Classifier(image, trackingRect); printf (" done.\n"); Size trackingRectSize; trackingRectSize = trackingRect; printf ("start tracking (stop by pressing any key)...\n\n"); // Inicializa o detector Detector* detector; detector = new Detector(tracker->getClassifier()); Rect trackedPatch = trackingRect; Rect validROI; validROI.upper = validROI.left = 0; validROI.height = image->height; validROI.width = image->width; key=(char)-1; while (key==(char)-1) { imageSequence->getImage(); image = imageSequence->getIplImage(); curFrame = imageSequence->getGrayImage (); if (curFrame == NULL) break; //calculate the patches within the search region Patches *trackingPatches; Rect searchRegion; searchRegion = getTrackingROI(searchFactor, trackedPatch, validROI); trackingPatches = new PatchesRegularScan(searchRegion, wholeImage, trackingRectSize, overlap); curFrameRep->setNewImageAndROI(curFrame, searchRegion); detector->classifySmooth(curFrameRep, trackingPatches); trackedPatch = trackingPatches->getRect(detector->getPatchIdxOfBestDetection()); if (detector->getNumDetections() <= 0){printf("Lost...\n");break;} // Treina o classificador tracker->train(image,trackedPatch); float alpha, confidence, eval; alpha = tracker->getSumAlphaClassifier(); confidence = detector->getConfidenceOfBestDetection() / alpha; eval = tracker->classify(image, trackedPatch); printf("alpha: %5.3f confidence: %5.3f evalOficial: %5.3f ", alpha, confidence, eval); int orig = trackedPatch.upper; trackedPatch.upper -= 5; for(int i = 0; i < 10; i++){ eval = tracker->classify(image, trackedPatch); printf("%5.3f ", eval); trackedPatch.upper += 1; imageSequence->paintRectangle (trackedPatch, Color (0,255,0), 1); } trackedPatch.upper = orig; printf("\n"); //display results imageSequence->paintRectangle (trackedPatch, Color (255,0,0), 5); imageSequence->viewImage ("Tracking...", false); key=cvWaitKey(200); } //clean up delete tracker; delete imageSequenceSource; delete imageSequence; if (curFrame == NULL) delete[] curFrame; delete curFrameRep; }
int main(void) { double data[][4] = { {3.5, 5.3, 0.2, -1.2}, {4.4, 2.2, 0.3, 0.4}, {1.3, 0.5, 4.1, 3.2} }; int labels[] = {1, 2, 3}; Classifier* logReg = new LogisticRegression(); logReg->epsilon = 1e-5; logReg->feedData<4>(data, 3, 4); logReg->feedLabels(labels, 3); // Get elapsed time in seconds tic(); logReg->train(); fprintf("Elapsed time: %.3f seconds.\n", toc()); fprintf("W:\n"); printMatrix(*logReg->W); fprintf("b:\n"); printVector(logReg->b, 3); // double** dataTest = data; double dataTest[][4] = { {3.5, 5.3, 0.2, -1.2}, {4.4, 2.2, 0.3, 0.4}, {1.3, 0.5, 4.1, 3.2} }; fprintf("Ground truth:\n"); printMatrix(*logReg->Y); fprintf("Predicted probability matrix:\n"); Matrix& Prob_pred = logReg->predictLabelScoreMatrix<4>(dataTest, 3, 4); disp(Prob_pred); fprintf("Predicted label matrix:\n"); Matrix& Y_pred = logReg->predictLabelMatrix<4>(dataTest, 3, 4); printMatrix(Y_pred); int* pred_labels = logReg->predict(dataTest, 3, 4); Classifier::getAccuracy(pred_labels, labels, 3); tic(); std::string trainDataFilePath = "heart_scale"; logReg = new LogisticRegression(); DataSet& dataSet = readDataSetFromFile(trainDataFilePath); logReg->feedData(*dataSet.X); logReg->feedLabels(dataSet.Y, dataSet.nExample); logReg->train(); Matrix& XTest = *dataSet.X; pred_labels = logReg->predict(XTest); int nt = XTest.getRowDimension(); Classifier::getAccuracy(pred_labels, logReg->labels, nt); fprintf("Elapsed time: %.3f seconds.\n", toc()); return EXIT_SUCCESS; }