void App() { long t1; (void) time(&t1); seedMT(t1); float em_converged = 1e-4; int em_max_iter = 20; int em_estimate_alpha = 1; //1 indicate estimate alpha and 0 use given value int var_max_iter = 30; double var_converged = 1e-6; double initial_alpha = 0.1; int n_topic = 30; LDA lda; lda.Init(em_converged, em_max_iter, em_estimate_alpha, var_max_iter, var_converged, initial_alpha, n_topic); Corpus cor; //Str data = "../../data/ap.dat"; Str data = "lda_data"; cor.LoadData(data); Corpus train; Corpus test; double p = 0.8; SplitData(cor, p, &train, &test); Str type = "seeded"; LdaModel m; lda.RunEM(type, train, test, &m); LOG(INFO) << m.alpha; VVReal gamma; VVVReal phi; lda.Infer(test, m, &gamma, &phi); WriteStrToFile(Join(gamma, " ", "\n"), "gamma"); WriteStrToFile(Join(phi, " ", "\n", "\n\n"), "phi"); }
void LdaApp() { long t1; (void) time(&t1); seedMT(t1); float em_converged = 1e-4; int em_max_iter = FLAGS_em_iterate; int em_estimate_alpha = 1; //1 indicate estimate alpha and 0 use given value int var_max_iter = FLAGS_var_iterate; double var_converged = 1e-6; double initial_alpha = FLAGS_alpha; int topic = FLAGS_topic_num; Corpus train; Corpus test; train.LoadData(FLAGS_cor_train); test.LoadData(FLAGS_cor_test); LOG(INFO) << train.Len()<< " " << test.Len(); LdaModel m; LDA lda; lda.Init(em_converged, em_max_iter, em_estimate_alpha, var_max_iter, var_converged, initial_alpha, topic); Str type = "seeded"; lda.RunEM(type, train, test, &m); VVReal gamma; VVVReal phi; lda.Infer(test, m, &gamma, &phi); WriteStrToFile(Join(gamma, " ", "\n"), "./model/gamma"); WriteStrToFile(Join(m.log_prob_w, topic, train.num_terms), "./model/beta"); WriteStrToFile(Join(phi, " ", "\n", "\n\n"), "./model/phi"); }
int main(int argc, char** argv) { float a[]={ 0.234400724097, 0.445210153051, 0.420883079381, 0.0584111370634, 0.930917795284, 0.463946380108, 0.827477442854, 0.195052690912, 0.224843236267, 0.011674592046, 0.778465234345, 0.795119566607, 0.834330061452, 0.250878254601, 0.907848368295, 0.159768191396, 0.359447753375, 0.694377176768, 0.323279688498, 0.590454463022, 0.32053508251, 0.25926247011, 0.473382632749, 0.680857359827, 0.871843303433, 0.347550207092, 0.807721675262, 0.51342440135, 0.633862634367, 0.588847708996, 0.604920986251, 0.9485023141, 0.511286105241, 0.780677021392, 0.346168472115, 0.408572254219, 0.977881372787, 0.994457177414, 0.553713182589, 0.181657338197, 0.188679332574, 0.138351555791, 0.549762090688, 0.763422732648, 0.270469815182, 0.368094710756, 0.28652717945, 0.344130955251, 0.808703681865, 0.48242375244, 0.0961390490465, 0.585178232015, 0.0947071702324, 0.00663925147531, 0.409282147388, 0.865532591897, 0.233760414088, 0.399258033215, 0.547551739688, 0.078241816204, 0.672857401346, 0.083814529556, 0.68575517509, 0.213487218459 }; vector<int> labels = {1,1,1,1,1,2,1,2}; Mat data(Size(8,8), CV_32F, a); //cout << data << endl; PCA pca; LDA lda; pca(data, Mat(), CV_PCA_DATA_AS_ROW, 7); //cout << pca.project(data) << endl; lda.compute(pca.project(data), labels); cout << lda.project(pca.project(data)) << endl; //cout << lda.reconstruct(lda.project(pca.project(data))) << endl; return 0; }
void test_lda() { printf("[test lda]\n"); double feats[3*10] = {1,2,3,4,5,6,7,8,9,3,5,7,5,3,7, 11,12,13,14,15,16,17,18,19, 13,15,17,15,13,17}; int lbls[10] = {-1, -1, 1, 1, -1, 1, 1, 1, -1, 1}; //HFMatrix<double> features(feats, 3, 10); Loader loader("data/hello_matrix"); HFMatrix<double> features(loader); //HFVector<int> labels(lbls, 10); Loader loader2("data/hello_label"); HFVector<int> labels(loader2); //Saver saver("data/hello_label"); //labels.save(saver); //Saver saver("data/hello_matrix"); //features.save(saver); LDA lda; lda.train(&features, &labels); }
int main(int argc, char** argv) { gflags::SetUsageMessage("Usage : ./warplda [ flags... ]"); gflags::ParseCommandLineFlags(&argc, &argv, true); if ((FLAGS_inference || FLAGS_estimate) == false) FLAGS_estimate = true; if (!FLAGS_z.empty()) FLAGS_dumpz = true; SetIfEmpty(FLAGS_bin, FLAGS_prefix + ".bin"); SetIfEmpty(FLAGS_model, FLAGS_prefix + ".model"); SetIfEmpty(FLAGS_info, FLAGS_prefix + ".info"); SetIfEmpty(FLAGS_vocab, FLAGS_prefix + ".vocab"); SetIfEmpty(FLAGS_topics, FLAGS_prefix + ".topics"); LDA *lda = new WarpLDA<1>(); lda->loadBinary(FLAGS_bin); if (FLAGS_estimate) { lda->estimate(FLAGS_k, FLAGS_alpha / FLAGS_k, FLAGS_beta, FLAGS_niter, FLAGS_perplexity); if (FLAGS_dumpmodel) { std::cout << "Dump model " << FLAGS_model << std::endl; lda->storeModel(FLAGS_model); } if (FLAGS_writeinfo) { std::cout << "Write Info " << FLAGS_info << " ntop " << FLAGS_ntop << std::endl; lda->writeInfo(FLAGS_vocab, FLAGS_info, FLAGS_ntop); } if (FLAGS_dumpz) { SetIfEmpty(FLAGS_z, FLAGS_prefix + ".z.estimate"); std::cout << "Dump Z " << FLAGS_z << std::endl; lda->storeZ(FLAGS_z); } } else if(FLAGS_inference) { lda->loadModel(FLAGS_model); lda->inference(FLAGS_niter, FLAGS_perplexity); if (FLAGS_dumpz) { SetIfEmpty(FLAGS_z, FLAGS_prefix + ".z.inference"); std::cout << "Dump Z " << FLAGS_z << std::endl; lda->storeZ(FLAGS_z); } } return 0; }
int main(int argc, char** argv) { string filter = "Gaussian"; string descriptor = "HAOG"; string database = "CUFSF"; uint count = 0; vector<string> extraPhotos, photos, sketches; loadImages(argv[1], photos); loadImages(argv[2], sketches); //loadImages(argv[7], extraPhotos); uint nPhotos = photos.size(), nSketches = sketches.size(), nExtra = extraPhotos.size(); uint nTraining = 2*nPhotos/3; cout << "Read " << nSketches << " sketches." << endl; cout << "Read " << nPhotos + nExtra << " photos." << endl; vector<Mat*> sketchesDescriptors(nSketches), photosDescriptors(nPhotos), extraDescriptors(nExtra); Mat img, temp; int size=32, delta=16; #pragma omp parallel for private(img, temp) for(uint i=0; i<nSketches; i++){ img = imread(sketches[i],0); sketchesDescriptors[i] = new Mat(); #pragma omp critical temp = extractDescriptors(img, size, delta, filter, descriptor); *(sketchesDescriptors[i]) = temp.clone(); } #pragma omp parallel for private(img, temp) for(uint i=0; i<nPhotos; i++){ img = imread(photos[i],0); photosDescriptors[i] = new Mat(); #pragma omp critical temp = extractDescriptors(img, size, delta, filter, descriptor); *(photosDescriptors[i]) = temp.clone(); } #pragma omp parallel for private(img, temp) for(uint i=0; i<nExtra; i++){ img = imread(extraPhotos[i],0); extraDescriptors[i] = new Mat(); #pragma omp critical temp = extractDescriptors(img, size, delta, filter, descriptor); *(extraDescriptors[i]) = temp.clone(); } auto seed = unsigned(count); srand(seed); random_shuffle(sketchesDescriptors.begin(), sketchesDescriptors.end()); srand(seed); random_shuffle(photosDescriptors.begin(), photosDescriptors.end()); //training vector<Mat*> trainingSketchesDescriptors, trainingPhotosDescriptors; trainingSketchesDescriptors.insert(trainingSketchesDescriptors.end(), sketchesDescriptors.begin(), sketchesDescriptors.begin()+nTraining); trainingPhotosDescriptors.insert(trainingPhotosDescriptors.end(), photosDescriptors.begin(), photosDescriptors.begin()+nTraining); //testing vector<Mat*> testingSketchesDescriptors, testingPhotosDescriptors; testingSketchesDescriptors.insert(testingSketchesDescriptors.end(), sketchesDescriptors.begin()+nTraining, sketchesDescriptors.end()); testingPhotosDescriptors.insert(testingPhotosDescriptors.end(), photosDescriptors.begin()+nTraining, photosDescriptors.end()); testingPhotosDescriptors.insert(testingPhotosDescriptors.end(), extraDescriptors.begin(), extraDescriptors.end()); PCA pca; LDA lda; vector<int> labels; uint nTestingSketches = testingSketchesDescriptors.size(), nTestingPhotos = testingPhotosDescriptors.size(); for(uint i=0; i<nTraining; i++){ labels.push_back(i); } labels.insert(labels.end(),labels.begin(),labels.end()); //bags vector<Mat*> testingSketchesDescriptorsBag(nTestingSketches), testingPhotosDescriptorsBag(nTestingPhotos); for(int b=0; b<200; b++){ vector<int> bag_indexes = gen_bag(154, 0.1); uint dim = (bag(*(trainingPhotosDescriptors[0]), bag_indexes, 154)).total(); Mat X(dim, 2*nTraining, CV_32F); #pragma omp parallel for private(temp) for(uint i=0; i<nTraining; i++){ temp = *(trainingSketchesDescriptors[i]); temp = bag(temp, bag_indexes, 154); temp.copyTo(X.col(i)); } #pragma omp parallel for private(temp) for(uint i=0; i<nTraining; i++){ temp = *(trainingPhotosDescriptors[i]); temp = bag(temp, bag_indexes, 154); temp.copyTo(X.col(i+nTraining)); } Mat Xs = X(Range::all(), Range(0,nTraining)); Mat Xp = X(Range::all(), Range(nTraining,2*nTraining)); Mat meanX = Mat::zeros(dim, 1, CV_32F), instance; Mat meanXs = Mat::zeros(dim, 1, CV_32F); Mat meanXp = Mat::zeros(dim, 1, CV_32F); // calculate sums for (int i = 0; i < X.cols; i++) { instance = X.col(i); add(meanX, instance, meanX); } for (int i = 0; i < Xs.cols; i++) { instance = Xs.col(i); add(meanXs, instance, meanXs); } for (int i = 0; i < Xp.cols; i++) { instance = Xp.col(i); add(meanXp, instance, meanXp); } // calculate total mean meanX.convertTo(meanX, CV_32F, 1.0/static_cast<double>(X.cols)); meanXs.convertTo(meanXs, CV_32F, 1.0/static_cast<double>(Xs.cols)); meanXp.convertTo(meanXp, CV_32F, 1.0/static_cast<double>(Xp.cols)); // subtract the mean of matrix for(int i=0; i<X.cols; i++) { Mat c_i = X.col(i); subtract(c_i, meanX.reshape(1,dim), c_i); } for(int i=0; i<Xs.cols; i++) { Mat c_i = Xs.col(i); subtract(c_i, meanXs.reshape(1,dim), c_i); } for(int i=0; i<Xp.cols; i++) { Mat c_i = Xp.col(i); subtract(c_i, meanXp.reshape(1,dim), c_i); } if(meanX.total() >= nTraining) pca(X, Mat(), CV_PCA_DATA_AS_COL, nTraining-1); else pca.computeVar(X, Mat(), CV_PCA_DATA_AS_COL, .99); Mat W1 = pca.eigenvectors.t(); Mat ldaData = (W1.t()*X).t(); lda.compute(ldaData, labels); Mat W2 = lda.eigenvectors(); W2.convertTo(W2, CV_32F); Mat projectionMatrix = (W2.t()*W1.t()).t(); //testing #pragma omp parallel for private(temp) for(uint i=0; i<nTestingSketches; i++){ temp = *(testingSketchesDescriptors[i]); temp = bag(temp, bag_indexes, 154); temp = projectionMatrix.t()*(temp-meanX); if(b==0){ testingSketchesDescriptorsBag[i] = new Mat(); *(testingSketchesDescriptorsBag[i]) = temp.clone(); } else{ vconcat(*(testingSketchesDescriptorsBag[i]), temp, *(testingSketchesDescriptorsBag[i])); } } #pragma omp parallel for private(temp) for(uint i=0; i<nTestingPhotos; i++){ temp = *(testingPhotosDescriptors[i]); temp = bag(temp, bag_indexes, 154); temp = projectionMatrix.t()*(temp-meanX); if(b==0){ testingPhotosDescriptorsBag[i] = new Mat(); *(testingPhotosDescriptorsBag[i]) = temp.clone(); } else{ vconcat(*(testingPhotosDescriptorsBag[i]), temp, *(testingPhotosDescriptorsBag[i])); } } } Mat distancesChi = Mat::zeros(nTestingSketches,nTestingPhotos,CV_64F); Mat distancesL2 = Mat::zeros(nTestingSketches,nTestingPhotos,CV_64F); Mat distancesCosine = Mat::zeros(nTestingSketches,nTestingPhotos,CV_64F); #pragma omp parallel for for(uint i=0; i<nTestingSketches; i++){ for(uint j=0; j<nTestingPhotos; j++){ distancesChi.at<double>(i,j) = chiSquareDistance(*(testingSketchesDescriptorsBag[i]),*(testingPhotosDescriptorsBag[j])); distancesL2.at<double>(i,j) = norm(*(testingSketchesDescriptorsBag[i]),*(testingPhotosDescriptorsBag[j])); distancesCosine.at<double>(i,j) = abs(1-cosineDistance(*(testingSketchesDescriptorsBag[i]),*(testingPhotosDescriptorsBag[j]))); } } string file1name = "kernel-drs-" + descriptor + database + to_string(nTraining) + string("chi") + to_string(count) + string(".xml"); string file2name = "kernel-drs-" + descriptor + database + to_string(nTraining) + string("l2") + to_string(count) + string(".xml"); string file3name = "kernel-drs-" + descriptor + database + to_string(nTraining) + string("cosine") + to_string(count) + string(".xml"); FileStorage file1(file1name, FileStorage::WRITE); FileStorage file2(file2name, FileStorage::WRITE); FileStorage file3(file3name, FileStorage::WRITE); file1 << "distanceMatrix" << distancesChi; file2 << "distanceMatrix" << distancesL2; file3 << "distanceMatrix" << distancesCosine; file1.release(); file2.release(); file3.release(); return 0; }
int main(int argc, char** argv) { string filter = "Gaussian"; string descriptor = "SIFT"; string database = "Forensic-extra"; uint count = 1; vector<string> extraPhotos, photos, sketches; loadImages(argv[5], photos); loadImages(argv[6], sketches); loadImages(argv[7], extraPhotos); uint nPhotos = photos.size(), nSketches = sketches.size(), nExtra = extraPhotos.size(); uint nTraining = 2*nPhotos/3; cout << "Read " << nSketches << " sketches." << endl; cout << "Read " << nPhotos + nExtra << " photos." << endl; vector<Mat*> sketchesDescriptors(nSketches), photosDescriptors(nPhotos), extraDescriptors(nExtra); Mat img, temp; int size=32, delta=16; #pragma omp parallel for private(img, temp) for(uint i=0; i<nSketches; i++){ img = imread(sketches[i],0); sketchesDescriptors[i] = new Mat(); #pragma omp critical temp = extractDescriptors(img, size, delta, filter, descriptor); *(sketchesDescriptors[i]) = temp.clone(); } #pragma omp parallel for private(img, temp) for(uint i=0; i<nPhotos; i++){ img = imread(photos[i],0); photosDescriptors[i] = new Mat(); #pragma omp critical temp = extractDescriptors(img, size, delta, filter, descriptor); *(photosDescriptors[i]) = temp.clone(); } #pragma omp parallel for private(img, temp) for(uint i=0; i<nExtra; i++){ img = imread(extraPhotos[i],0); extraDescriptors[i] = new Mat(); #pragma omp critical temp = extractDescriptors(img, size, delta, filter, descriptor); *(extraDescriptors[i]) = temp.clone(); } auto seed = unsigned(count); srand(seed); random_shuffle(sketchesDescriptors.begin(), sketchesDescriptors.end()); srand(seed); random_shuffle(photosDescriptors.begin(), photosDescriptors.end()); //training vector<Mat*> trainingSketchesDescriptors1, trainingPhotosDescriptors1, trainingSketchesDescriptors2, trainingPhotosDescriptors2; trainingSketchesDescriptors1.insert(trainingSketchesDescriptors1.end(), sketchesDescriptors.begin(), sketchesDescriptors.begin()+nTraining/2); trainingPhotosDescriptors1.insert(trainingPhotosDescriptors1.end(), photosDescriptors.begin(), photosDescriptors.begin()+nTraining/2); trainingSketchesDescriptors2.insert(trainingSketchesDescriptors2.end(), sketchesDescriptors.begin()+nTraining/2, sketchesDescriptors.begin()+nTraining); trainingPhotosDescriptors2.insert(trainingPhotosDescriptors2.end(), photosDescriptors.begin()+nTraining/2, photosDescriptors.begin()+nTraining); uint nTraining1 = trainingPhotosDescriptors1.size(), nTraining2 = trainingPhotosDescriptors2.size(); //testing vector<Mat*> testingSketchesDescriptors, testingPhotosDescriptors; testingSketchesDescriptors.insert(testingSketchesDescriptors.end(), sketchesDescriptors.begin()+nTraining, sketchesDescriptors.end()); testingPhotosDescriptors.insert(testingPhotosDescriptors.end(), photosDescriptors.begin()+nTraining, photosDescriptors.end()); testingPhotosDescriptors.insert(testingPhotosDescriptors.end(), extraDescriptors.begin(), extraDescriptors.end()); uint nTestingSketches = testingSketchesDescriptors.size(), nTestingPhotos = testingPhotosDescriptors.size(); PCA pca; LDA lda; vector<int> labels; for(uint i=0; i<nTraining2; i++){ labels.push_back(i); } labels.insert(labels.end(),labels.begin(),labels.end()); //bags vector<Mat*> testingSketchesDescriptorsBag(nTestingSketches), testingPhotosDescriptorsBag(nTestingPhotos), trainingPhotosDescriptors1Temp(nTraining1), trainingSketchesDescriptors1Temp(nTraining1); for(int b=0; b<30; b++){ vector<int> bag_indexes = gen_bag(154, 0.1); #pragma omp parallel for private(temp) for(uint i=0; i<nTraining1; i++){ temp = *(trainingSketchesDescriptors1[i]); temp = bag(temp, bag_indexes, 154); trainingSketchesDescriptors1Temp[i] = new Mat(); *(trainingSketchesDescriptors1Temp[i]) = temp.clone(); } #pragma omp parallel for private(temp) for(uint i=0; i<nTraining1; i++){ temp = *(trainingPhotosDescriptors1[i]); temp = bag(temp, bag_indexes, 154); trainingPhotosDescriptors1Temp[i] = new Mat(); *(trainingPhotosDescriptors1Temp[i]) = temp.clone(); } Kernel k(trainingPhotosDescriptors1Temp, trainingSketchesDescriptors1Temp); k.compute(); uint dim = (k.projectGallery(bag(*(trainingPhotosDescriptors1[0]), bag_indexes, 154))).total(); Mat X(dim, 2*nTraining2, CV_32F); #pragma omp parallel for private(temp) for(uint i=0; i<nTraining2; i++){ temp = *(trainingSketchesDescriptors2[i]); temp = bag(temp, bag_indexes, 154); temp = k.projectProbe(temp); temp.copyTo(X.col(i)); } #pragma omp parallel for private(temp) for(uint i=0; i<nTraining2; i++){ temp = *(trainingPhotosDescriptors2[i]); temp = bag(temp, bag_indexes, 154); temp = k.projectGallery(temp); temp.copyTo(X.col(i+nTraining2)); } Mat meanX = Mat::zeros(dim, 1, CV_32F), instance; // calculate sums for (int i = 0; i < X.cols; i++) { instance = X.col(i); add(meanX, instance, meanX); } // calculate total mean meanX.convertTo(meanX, CV_32F, 1.0/static_cast<double>(X.cols)); // subtract the mean of matrix for(int i=0; i<X.cols; i++) { Mat c_i = X.col(i); subtract(c_i, meanX.reshape(1,dim), c_i); } pca.computeVar(X, Mat(), CV_PCA_DATA_AS_COL, .99); Mat W1 = pca.eigenvectors.t(); Mat ldaData = (W1.t()*X).t(); lda.compute(ldaData, labels); Mat W2 = lda.eigenvectors(); W2.convertTo(W2, CV_32F); Mat projectionMatrix = (W2.t()*W1.t()).t(); //testing #pragma omp parallel for private(temp) for(uint i=0; i<nTestingSketches; i++){ temp = *(testingSketchesDescriptors[i]); temp = bag(temp, bag_indexes, 154); temp = k.projectProbe(temp); temp = projectionMatrix.t()*(temp-meanX); if(b==0){ testingSketchesDescriptorsBag[i] = new Mat(); *(testingSketchesDescriptorsBag[i]) = temp.clone(); } else{ vconcat(*(testingSketchesDescriptorsBag[i]), temp, *(testingSketchesDescriptorsBag[i])); } } #pragma omp parallel for private(temp) for(uint i=0; i<nTestingPhotos; i++){ temp = *(testingPhotosDescriptors[i]); temp = bag(temp, bag_indexes, 154); temp = k.projectGallery(temp); temp = projectionMatrix.t()*(temp-meanX); if(b==0){ testingPhotosDescriptorsBag[i] = new Mat(); *(testingPhotosDescriptorsBag[i]) = temp.clone(); } else{ vconcat(*(testingPhotosDescriptorsBag[i]), temp, *(testingPhotosDescriptorsBag[i])); } } } Mat distancesCosine = Mat::zeros(nTestingSketches,nTestingPhotos,CV_64F); #pragma omp parallel for for(uint i=0; i<nTestingSketches; i++){ for(uint j=0; j<nTestingPhotos; j++){ distancesCosine.at<double>(i,j) = abs(1-cosineDistance(*(testingSketchesDescriptorsBag[i]),*(testingPhotosDescriptorsBag[j]))); } } string file1name = "kernel-prs-" + filter + descriptor + database + to_string(nTraining) + string("cosine") + to_string(count) + string(".xml"); FileStorage file1(file1name, FileStorage::WRITE); file1 << "distanceMatrix" << distancesCosine; file1.release(); return 0; }
bool ex_model(void *arg) { Trainer::SetLogLevel (SSI_LOG_LEVEL_DEBUG); ssi_size_t n_classes = 4; ssi_size_t n_samples = 50; ssi_size_t n_streams = 1; ssi_real_t train_distr[][3] = { 0.25f, 0.25f, 0.1f, 0.25f, 0.75f, 0.1f, 0.75f, 0.75f, 0.1f, 0.75f, 0.75f, 0.1f }; ssi_real_t test_distr[][3] = { 0.5f, 0.5f, 0.5f }; SampleList strain; SampleList sdevel; SampleList stest; ModelTools::CreateTestSamples (strain, n_classes, n_samples, n_streams, train_distr, "user"); ModelTools::CreateTestSamples (sdevel, n_classes, n_samples, n_streams, train_distr, "user"); ModelTools::CreateTestSamples (stest, 1, n_samples * n_classes, n_streams, test_distr, "user"); ssi_char_t string[SSI_MAX_CHAR]; for (ssi_size_t n_class = 1; n_class < n_classes; n_class++) { ssi_sprint (string, "class%02d", n_class); stest.addClassName (string); } // train svm { SVM *model = ssi_create(SVM, 0, true); model->getOptions()->seed = 1234; Trainer trainer(model); trainer.train(strain); trainer.save("svm"); } // evaluation { Trainer trainer; Trainer::Load(trainer, "svm"); Evaluation eval; eval.eval(&trainer, sdevel); eval.print(); trainer.cluster(stest); ModelTools::PlotSamples(stest, "svm (internal normalization)", ssi_rect(650, 0, 400, 400)); } // train knn { KNearestNeighbors *model = ssi_create(KNearestNeighbors, 0, true); model->getOptions()->k = 5; //model->getOptions()->distsum = true; Trainer trainer (model); trainer.train (strain); trainer.save ("knn"); } // evaluation { Trainer trainer; Trainer::Load (trainer, "knn"); Evaluation eval; eval.eval (&trainer, sdevel); eval.print (); trainer.cluster (stest); ModelTools::PlotSamples(stest, "knn", ssi_rect(650, 0, 400, 400)); } // train naive bayes { NaiveBayes *model = ssi_create(NaiveBayes, 0, true); model->getOptions()->log = true; Trainer trainer (model); trainer.train (strain); trainer.save ("bayes"); } // evaluation { Trainer trainer; Trainer::Load (trainer, "bayes"); Evaluation eval; eval.eval (&trainer, sdevel); eval.print (); trainer.cluster (stest); ModelTools::PlotSamples(stest, "bayes", ssi_rect(650, 0, 400, 400)); } // training { LDA *model = ssi_create(LDA, "lda", true); Trainer trainer (model); trainer.train (strain); model->print(); trainer.save ("lda"); } // evaluation { Trainer trainer; Trainer::Load (trainer, "lda"); Evaluation eval; eval.eval (&trainer, sdevel); eval.print (); trainer.cluster (stest); ModelTools::PlotSamples(stest, "lda", ssi_rect(650, 0, 400, 400)); } ssi_print ("\n\n\tpress a key to contiue\n"); getchar (); return true; }