/**Predict data from 'data_file' using model from 'model_file' and save predictions to 'prediction_file' */ void PredictData(const string& data_file, const string& model_file, const string& prediction_file, bool sse_on) { // List of image file names and its labels TFileList file_list; // Structure of images and its labels TDataSet data_set; // Structure of features of images and its labels TFeatures features; // List of image labels TLabels labels; // Load list of image file names and its labels LoadFileList(data_file, &file_list); // Load images LoadImages(file_list, &data_set); // Extract features from images ExtractFeatures(data_set, &features, sse_on); // Classifier TClassifier classifier = TClassifier(TClassifierParams()); // Trained model TModel model; // Load model from file model.Load(model_file); // Predict images by its features using 'model' and store predictions // to 'labels' classifier.Predict(features, model, &labels); // Save predictions SavePredictions(file_list, labels, prediction_file); // Clear dataset structure ClearDataset(&data_set); }
/** Train SVM classifier using data from 'data_file' and save trained model to 'model_file' */ void TrainClassifier(const string& data_file, const string& model_file, bool sse_on) { // List of image file names and its labels TFileList file_list; // Structure of images and its labels TDataSet data_set; // Structure of features of images and its labels TFeatures features; // Model which would be trained TModel model; // Parameters of classifier TClassifierParams params; // Load list of image file names and its labels LoadFileList(data_file, &file_list); // Load images LoadImages(file_list, &data_set); // Extract features from images ExtractFeatures(data_set, &features, sse_on); // PLACE YOUR CODE HERE // You can change parameters of classifier here params.C = 0.01; TClassifier classifier(params); // Train classifier classifier.Train(features, &model); // Save model to file model.Save(model_file); // Clear dataset structure ClearDataset(&data_set); }
HOGFeatureClassifier::TModel OptimizeThresholdsInModel(const string &images_list, const string& model_file, RecognitionStatistics &stat) { TModel model; model.Load(model_file); float model_threshold = ImageRecognition::FindOptimalThresholdForModel(images_list, model, stat); model.setModelThreshold(model_threshold); model.Save(model_file); return model; }
void GetRectsFromImage(vector<ImageRecognition::SlidingRect> &rects, const Mat &image, const TModel &model, RecognitionStatistics &stat) { using namespace ImageRecognition; vector< unsigned int > sizes(sizeof(sizes_) / sizeof(unsigned int)); copy(sizes_, sizes_ + sizeof(sizes_) / sizeof(unsigned int), sizes.begin()); ImageRecognition::preprocessing prep(stat); Mat prepImage; clock_t begin_time = clock(); prep.do_prep(image, prepImage); //if (stat.flOutputTime && false) // cout << "Preprocessing Time: " << (float(end_time - begin_time) / 1000.0f) << endl; ImageRecognition::SlidingWindowFragmentation fragmentation(image, prepImage, model.GetContext(), stat); HOGFeatureClassifier::HoGResponseFunctor response(stat); response.InitModel(&model); fragmentation.FindMaximaWindows(rects, response); clock_t end_time = clock(); if (stat.flOutputTime) cout << "Recognition Time: " << (float(end_time - begin_time) / 1000.0f) << endl; }
void ResponseImage(vector<ImageRecognition::SlidingRect> &rects, const Mat &image, const string &model_filename, RecognitionStatistics &stat) { TModel model; model.Load(model_filename); ResponseImage(rects, image, model, stat); }
void ResponseImage(vector<ImageRecognition::SlidingRect> &rects, const Mat &image, const TModel &model, RecognitionStatistics &stat) { GetRectsFromImage(rects, image, model, stat); for (int i = 0; i < rects.size(); ++i) rects[i].falseDetection = (rects[i].value < model.getModelThreshold()); }
int ContOut::fftx( const QString &nm_in, const QString &nm_omega, const QString &nm_a, const QString &nm_phi, double ome_max, bool cmpl ) { int n = 0; TOutArr *in = getObjT<TOutArr*>( nm_in ); if( !in ) { return 0; } n = in->getDataD( "n", 0 ); if( n < 2 ) { return 0; } TModel *model = getAncestorT<TModel>(); if( !model ) { qWarning() << "not found model in" << getFullName() << WHE; return 0; } double tdt = model->getDataD( "tdt", 1.0 ); // output arrays may not exist, but must be special TOutArr *o_omega = getObjT<TOutArr*>( nm_omega ); if( o_omega ) { int tp = o_omega->getDataD( "type", 0 ); if( tp != TOutArr::OutArrType::outSpec ) { o_omega = nullptr; } } TOutArr *o_a = getObjT<TOutArr*>( nm_a ); if( o_a ) { int tp = o_a->getDataD( "type", 0 ); if( tp != TOutArr::OutArrType::outSpec ) { o_a = nullptr; } } TOutArr *o_phi = getObjT<TOutArr*>( nm_phi ); if( o_phi ) { int tp = o_phi->getDataD( "type", 0 ); if( tp != TOutArr::OutArrType::outSpec ) { o_phi = nullptr; } } if( ome_max <= 0.0 ) { ome_max = DMAX; } dvector v = *(in->getArray()); // copy, as fft may disturb data double *pv = v.data(); int o_n = 1 + n/2; fftw_complex *out = (fftw_complex*) fftw_malloc( (2+o_n) * sizeof( fftw_complex ) ); fftw_plan plan = fftw_plan_dft_r2c_1d( n, pv, out, FFTW_ESTIMATE ); fftw_execute( plan ); fftw_destroy_plan( plan ); int i_m = (int)( n * tdt * ome_max / ( 2*M_PI ) ); if( i_m > o_n ) { i_m = o_n; } if( o_omega ) { o_omega->reset(); o_omega->alloc( i_m ); } if( o_a ) { o_a->reset(); o_a->alloc( i_m ); } if( o_phi ) { o_phi->reset(); o_phi->alloc( i_m ); } double scl = 2.0 / double(n); // common scale for( int i=0; i<i_m ; ++i ) { double ome = 2*M_PI*i/(tdt*n); if( o_omega ) { o_omega->add( ome ); } if( o_a ) { if( cmpl ) { o_a->add( scl * out[i][0] ); } else { o_a->add( scl * hypot( out[i][0], out[i][1] ) ); } } if( o_phi ) { if( cmpl ) { o_phi->add( scl * out[i][1] ); } else { o_phi->add( atan2( out[i][1], out[i][0] ) ); } } } fftw_free( out ); out = nullptr; return i_m; }