int main(int argc, char *argv[]) { QCoreApplication a(argc, argv); QCoreApplication::setApplicationName("faces"); QCoreApplication::setOrganizationDomain("wacjan.com"); QCoreApplication::setOrganizationName("wacjan"); Configuration *cfg = CfgReader::readConfiguration(cfgPath); QString param; if(argc > 1){ param = argv[1]; } else { param = ""; } //przygotowanie danych qDebug() << param; if(argc == 1 || param == "-data" || param == "-data_box"){ QList<QImage> facesList; QList<QImage> nonFacesList; if(param == "-data_box"){//dla twarzy bez coordynatów twarzy facesList = FacesExtractor::getFaces(cfg->getFaceSize(), cfg->getFacesDirectory()); nonFacesList = FacesExtractor::getFaces(cfg->getFaceSize(), cfg->getNonFacesDirectory()); } if(param == "-data"){//dla zdjęc z coordynatami twarzy facesList = FacesExtractor::getFacesWithoutCoords(cfg->getFaceSize(), cfg->getFacesDirectory()); nonFacesList = FacesExtractor::getFacesWithoutCoords(cfg->getFaceSize(), cfg->getNonFacesDirectory()); } FacesExtractor::saveFacesToFile(cfg->getDataSavePath(), facesList, nonFacesList, cfg->getFaceSize().width()*cfg->getFaceSize().height()); qDebug() << "Znaleziona ilosc probek w zestawie treningowym: " << "faces: " << facesList.length() << " nonfaces: " << nonFacesList.length(); facesList.clear(); nonFacesList.clear(); } //uczenie sieci if(argc == 1 || param == "-train"){ train(cfg); } return 0; }
int main(int argc, char **argv) { float sigma = 0.2; float epsilon = 0.8; int n_hid1 = 100; int n_hid2 = 20; if(argc == 2) epsilon = std::atof(argv[1]); else if(argc >= 3){ n_hid1 = std::atoi(argv[1]); n_hid2 = std::atoi(argv[2]); if(argc >= 4) epsilon = std::atof(argv[3]); if(argc >= 5) sigma = std::atof(argv[4]); } train("train-images-idx3-ubyte", "train-labels-idx1-ubyte", n_hid1, n_hid2, sigma, epsilon); return 0; }
int main(int argc, char **argv) { char input_file_name[1024]; char model_file_name[1024]; const char *error_msg; parse_command_line(argc, argv, input_file_name, model_file_name); read_problem(input_file_name); param.train_file = Malloc(char,1024); strcpy(param.train_file, input_file_name); error_msg = check_parameter(&prob,¶m); if(error_msg) { fprintf(stderr,"ERROR: %s\n",error_msg); exit(1); } if(flag_cross_validation) { do_cross_validation(); } else { clock_t start_cpu, end_cpu; double cpu_time_used; start_cpu = clock(); model_=train(&prob, ¶m); end_cpu = clock(); cpu_time_used = ((double) (end_cpu - start_cpu)) / CLOCKS_PER_SEC; if(save_model(model_file_name, model_)) { fprintf(stderr,"can't save model to file %s\n",model_file_name); exit(1); } free_and_destroy_model(&model_); } destroy_param(¶m); free(prob.y); free(prob.x); free(x_space); free(line); return 0; }
sgd::sgd(dataset_view_type docs, std::unique_ptr<learn::loss::loss_function> loss, learn::sgd_model::options_type options, double gamma, size_t max_iter, bool calibrate) : model_{docs.total_features(), options}, gamma_{gamma}, max_iter_{max_iter}, loss_{std::move(loss)} { if (calibrate) { model_.calibrate(docs, *loss_, [&](const learn::instance& inst) { return docs.label(inst); }); } train(std::move(docs)); }
OCR::OCR(string trainFile) { DEBUG = false; trained = false; saveSegments = false; charSize = 20; // 读取OCR.xml文件 FileStorage fs; fs.open("OCR.xml", FileStorage::READ); Mat TrainingData; Mat Classes; fs["TrainingDataF15"] >> TrainingData; fs["classes"] >> Classes; train(TrainingData, Classes, 10); }
int SkillLevel::readBook(int minimumGain, int maximumGain, const calendar& turn, int maximumLevel) { int gain = rng(minimumGain, maximumGain); int level; for (int i = 0; i < gain; ++i) { train(level); if (level >= maximumLevel) break; } practice(turn); return _exercise; }
CvGBTrees::CvGBTrees( const CvMat* _train_data, int _tflag, const CvMat* _responses, const CvMat* _var_idx, const CvMat* _sample_idx, const CvMat* _var_type, const CvMat* _missing_mask, CvGBTreesParams _params ) { weak = 0; data = 0; default_model_name = "my_boost_tree"; orig_response = sum_response = sum_response_tmp = 0; subsample_train = subsample_test = 0; missing = sample_idx = 0; class_labels = 0; class_count = 1; delta = 0.0f; train( _train_data, _tflag, _responses, _var_idx, _sample_idx, _var_type, _missing_mask, _params ); }
struct model* main(int argc, char **argv) { char input_file_name[1024]; char model_file_name[1024]; const char *error_msg; parse_command_line(argc, argv, input_file_name, model_file_name); auto prob = read_problem(input_file_name); error_msg = check_parameter(&prob, ¶m); if (error_msg) { fprintf(stderr, "ERROR: %s\n", error_msg); exit(1); } struct model *pmodel; if (flag_find_C) { do_find_parameter_C(&prob); } else if (flag_cross_validation) { do_cross_validation(&prob); } else { pmodel = train(&prob, ¶m); /*if (save_model(model_file_name, pmodel)) { fprintf(stderr, "can't save model to file %s\n", model_file_name); exit(1); } free_and_destroy_model(&pmodel);*/ } destroy_param(¶m); free(prob.y); free(prob.x); free(x_space); free(line); return pmodel; }
//Init OCR with training data OCR::OCR(string trainFile){ showSteps=false; debug=false; trained=false; saveSegments=false; charSize=20; //Read file storage and 15*15 training data FileStorage fs; fs.open("OCR.xml", FileStorage::READ); Mat TrainingData; Mat Classes; fs["TrainingDataF15"] >> TrainingData; fs["classes"] >> Classes; //Train the data, the hidden layers is 10 train(TrainingData, Classes, 10); }
int main(int argc, char** argv) { //destroy_params(); argc--; argv++; flags = 0; if (argc > 0) { readParams(argc, argv); } init_params(cam); train(); if ((flags & P_TREE) != 0) { printTree(ptree->get_root()); printf("\n\n"); } if ((flags & F_CHK) != 0) runTest(); predict(); destroy_params(); return (EXIT_SUCCESS); }
std::unique_ptr<Classifier> KNearestNeighborsConstructor::train(const cv::Mat & trainingsset, const cv::Mat& truthset) const{ auto res = std::unique_ptr<CvKNearest>(new CvKNearest()); assert(trainingsset.rows == truthset.rows); assert(truthset.cols == 1); //Transform Matrix for SVM cv::Mat truthAsFloat; truthset.convertTo(truthAsFloat, CV_32F); for(int i=0; i< truthset.rows; i++){ if (truthAsFloat.at<float>(i,0)==noaction){ truthAsFloat.at<float>(i,0) = 0; }else if(truthAsFloat.at<float>(i,0) == action){ truthAsFloat.at<float>(i,0) = 1; }else{ std::cerr << "[WARNING] Invalid truth value given to classifier"<<std::endl; } } res->train(trainingsset, truthAsFloat, cv::Mat(), false, _k); return std::unique_ptr<Classifier>(new KNearestNeighborClassifier(std::move(res),_k)); }
bool HierarchicalClustering::train_(UnlabelledData &trainingData){ if( trainingData.getNumSamples() == 0 ){ return false; } //Convert the training data into one matrix M = trainingData.getNumSamples(); N = trainingData.getNumDimensions(); MatrixFloat data(M,N); for(UINT i=0; i<M; i++){ for(UINT j=0; j<N; j++){ data[i][j] = trainingData[i][j]; } } return train( data ); }
int main(int argc, char **argv) { char input_file_name[1024]; char model_file_name[1024]; const char *error_msg; parse_command_line(argc, argv, input_file_name, model_file_name); read_problem(input_file_name); error_msg = check_parameter(&prob,¶m); if(error_msg) { fprintf(stderr,"ERROR: %s\n",error_msg); exit(1); } if (flag_find_C) { do_find_parameter_C(); } else if(flag_cross_validation) { double cv = binary_class_cross_validation(&prob, ¶m, nr_fold); printf("Cross Validation = %g%%\n",100.0*cv); } else { model_=train(&prob, ¶m); if(save_model(model_file_name, model_)) { fprintf(stderr,"can't save model to file %s\n",model_file_name); exit(1); } free_and_destroy_model(&model_); } destroy_param(¶m); free(prob.y); free(prob.x); free(x_space); free(line); return 0; }
CvGBTrees::CvGBTrees( const cv::Mat& trainData, int tflag, const cv::Mat& responses, const cv::Mat& varIdx, const cv::Mat& sampleIdx, const cv::Mat& varType, const cv::Mat& missingDataMask, CvGBTreesParams _params ) { data = 0; weak = 0; default_model_name = "my_boost_tree"; orig_response = sum_response = sum_response_tmp = 0; subsample_train = subsample_test = 0; missing = sample_idx = 0; class_labels = 0; class_count = 1; delta = 0.0f; clear(); train(trainData, tflag, responses, varIdx, sampleIdx, varType, missingDataMask, _params, false); }
/// <summary> /// Creates new instance of the OCR class. Trains the k-nearest algorithm with given data. /// </summary> /// <params name="path"> /// Relative or absolute path of the directory under which training samples are located. /// </params> /// <params name="classes"> /// Number of possible classes into which data can be classified into. /// </params> /// <params name="samples"> /// Total number of samples for each class type. /// </params> OCR::OCR(char* path, int classe, int samples) { sprintf(file_path, "%s", path); //file_path = path; train_samples = samples; classes = classe; size = 80; trainData = cvCreateMat(train_samples*classes, size*size, CV_32FC1); trainClasses = cvCreateMat(train_samples*classes, 1, CV_32FC1); //Get data (get images and process it) getData(); //train train(); printf(" ---------------------------------------------------------------\n"); printf("|\tClass\t|\tPrecision\t|\tAccuracy\t|\n"); printf(" ---------------------------------------------------------------\n"); }
RandomAccessHypPackEnumerator::RandomAccessHypPackEnumerator(vector<string> const& featureFiles, vector<string> const& scoreFiles, bool no_shuffle) { StreamingHypPackEnumerator train(featureFiles,scoreFiles); size_t index=0; for(train.reset(); !train.finished(); train.next()) { m_features.push_back(vector<MiraFeatureVector>()); m_scores.push_back(vector<ScoreDataItem>()); for(size_t j=0; j<train.cur_size(); j++) { m_features.back().push_back(train.featuresAt(j)); m_scores.back().push_back(train.scoresAt(j)); } m_indexes.push_back(index++); } m_cur_index = 0; m_no_shuffle = no_shuffle; m_num_dense = train.num_dense(); }
void CvScan::LoadANNModel(string s){ ann.clear(); //ann.load(s.c_str(), "ann"); //return; FileStorage fs; fs.open(s, FileStorage::READ); if (fs.isOpened()){ cout<<"File is opened:"<<s<<endl; } Mat TrainingData; Mat Classes; fs["TrainingDataF15"]>>TrainingData; fs["classes"]>>Classes; fs.release(); //cout << trainingDataF5 << endl; train(TrainingData, Classes, 10); //*/ }
int main(int argc, char **argv) { StaticData::Init(argc, argv); VowpalTaggit vt; if(StaticData::Has("train")) { mkTrainer(vt); size_t passes = StaticData::Get<size_t>("passes"); std::string trainFile = StaticData::Get<std::string>("train"); for(size_t i = 0; i < passes; ++i) { std::cerr << "Starting pass " << (i+1) << "/" << passes << std::endl; std::ifstream train(trainFile.c_str()); vt(train); if(StaticData::Has("final-model") && StaticData::Get<bool>("save-per-pass") && passes > 1) { std::string modelFile = StaticData::Get<std::string>("final-model"); std::string perPassModelFile = modelFile + "." + std::to_string(i+1); vt.save(perPassModelFile); } } if(StaticData::Has("final-model")) { std::string modelFile = StaticData::Get<std::string>("final-model"); vt.save(modelFile); } } if(StaticData::Has("test")) { std::string testFile = StaticData::Get<std::string>("test"); mkPredictor(vt); std::ifstream test(testFile.c_str()); vt(test); } return 0; }
basicOCR::basicOCR()//构造函数 { //initial sprintf(file_path , "OCR/"); train_samples = 50; //训练样本 classes = 95;// ASCII size = 128;// trainData = cvCreateMat(train_samples*classes, size*size, CV_32FC1);//训练数据的矩阵 trainClasses = cvCreateMat(train_samples*classes, 1, CV_32FC1); //Get data (get images and process it) getData(); //train train(); //Test //test(); }
/*--------------------------------------------------------------------------------------------------------------------------*/ int main(void) { int hp_remaining = 10, max = 10, experience = 0, userInput; float days_remaining = 8; do { menu(&days_remaining, &hp_remaining, &experience); printf("\nSelect: "); scanf(" %d", &userInput); if (userInput == 1) { rest_at_inn(&days_remaining, &hp_remaining, max); } else if (userInput == 2) { train(&days_remaining, &hp_remaining, &experience); } else if (userInput == 3) { battle_demon_lord(&hp_remaining); } else if (userInput == 4) { printf("Okay buh bye then...\n\n"); } else { printf("Errr try again...\n\n"); } } while ((userInput != 4) && (hp_remaining > 0)); printf("\nGame Over!\n\n"); return 0; }
void main() { Configuration configurations[] = { //{ "../nn_pattern", LeafRecognizerType::TwoStepsRecognizer, NNToolsType::Pattern, SkeletonType::None }, { "../nn_skeleton_raster", LeafRecognizerType::NeuralNetworkRecognizer, NNToolsType::Skeleton, SkeletonType::Raster }, //{ "../nn_skeleton_vector", LeafRecognizerType::NeuralNetworkRecognizer, NNToolsType::Skeleton, SkeletonType::Vector }, //{ "../nn_skeleton_raster_vector", LeafRecognizerType::NeuralNetworkRecognizer, NNToolsType::Skeleton, SkeletonType::Raster_Vector }, }; for (auto config : configurations) { gConfiguration = config; NeuralNetworkTools* neuralNetTools = OCRFactories::CreateTools(); if (neuralNetTools == nullptr) std::cout << "error nullptr" << std::endl; train(neuralNetTools); delete neuralNetTools; } system("pause"); }
// // Training the Neural Network // void NN_File::train(const std::string & trainingSet) { TiXmlDocument doc(trainingSet); doc.LoadFile(); TiXmlElement * pRoot = doc.RootElement(); loadNetwork(pRoot->FirstChildElement("neuralnetwork")); randomWeights(); double minError; pRoot->Attribute("error", &minError); minError /= 100.0; // Training float error = 100.0; while( error > minError ) { error = train(pRoot); std::cout << "Error : " << error *100 << std::endl; } }
int CV_SLMLTest::run_test_case( int testCaseIdx ) { int code = CvTS::OK; code = prepare_test_case( testCaseIdx ); if( code == CvTS::OK ) { data.mix_train_and_test_idx(); code = train( testCaseIdx ); if( code == CvTS::OK ) { get_error( testCaseIdx, CV_TEST_ERROR, &test_resps1 ); save( tmpnam( fname1 ) ); load( fname1); get_error( testCaseIdx, CV_TEST_ERROR, &test_resps2 ); save( tmpnam( fname2 ) ); } else ts->printf( CvTS::LOG, "model can not be trained" ); } return code; }
int main() { int *filter = malloc(FILTER_SIZE * sizeof(int)); memset(filter, 0, FILTER_SIZE * sizeof(int)); int num_collisions = 0; unsigned long hash_index; for (int i = 0; i < HAMLET_NUM_LINES/2; i++) { hash_index = hash(HAMLET_LINES[i]); train(filter, FILTER_SIZE, hash_index); } for (int i = 0; i < HAMLET_NUM_LINES/2; i++) { if (!contains(filter, FILTER_SIZE, hash(HAMLET_LINES[i]))) { printf("ERROR: false negative with the line: %s\n", HAMLET_LINES[i]); } } for (int i = HAMLET_NUM_LINES/2; i < HAMLET_NUM_LINES; i++) { if (contains(filter, FILTER_SIZE, hash(HAMLET_LINES[i]))) { num_collisions++; } } printf("There were %i collisions\n", num_collisions); return 0; }
int main(int argc, char **argv) { if(argc < 2) { main_help(); return EXIT_FAILURE; } if(!strcmp(argv[1], "convert")) return convert(argc-2, argv+2); else if(!strcmp(argv[1], "train")) return train(argc-2, argv+2); else if(!strcmp(argv[1], "predict")) return predict(argc-2, argv+2); else if(!strcmp(argv[1], "view")) return view(argc-2, argv+2); else if(!strcmp(argv[1],"similarity")) return similarity(argc-2,argv+2); fprintf(stderr, "Error: Invalid command %s\n", argv[1]); return EXIT_FAILURE; }
int main(int argc, char **argv) { char input_file_name[1024]; char model_file_name[1024]; const char *error_msg; parse_command_line(argc, argv, input_file_name, model_file_name); read_problem(input_file_name); error_msg = check_parameter(&prob,¶m); if(error_msg) { fprintf(stderr,"Error: %s\n",error_msg); exit(1); } if(flag_cross_validation) { do_cross_validation(); } else { model_=train(&prob, ¶m); if(save_model(model_file_name, model_)) { fprintf(stderr,"can't save model to file %s\n",model_file_name); exit(1); } free_and_destroy_model(&model_); } destroy_param(¶m); free(prob.y); free(prob.x); free(x_space); free(line); return 0; }
int main( int argc, char *argv[] ) { int numInputs; Layer *hiddenLayer, *outputLayer; TestCase testCase; int i; initRand(); if( !processArguments( argc, argv ) ) { fprintf( stderr, "Usage: main [-r, -t] [node definition file] [input file, training file]\n" ); exit( EXIT_FAILURE ); } numInputs = buildLayers( &hiddenLayer, &outputLayer ); if( !numInputs ) { exit( EXIT_FAILURE ); } getDefaultTestCase( numInputs, outputLayer->numNodes, &testCase ); if( trainingFlag ) { for( i = 0; i < 1000; ++i ) { populateNextTestCase( &testCase ); train( &testCase, hiddenLayer, outputLayer ); } if( !persistWeights( numInputs, hiddenLayer, outputLayer ) ) { exit( EXIT_FAILURE ); } } else { while( populateNextTestCase( &testCase ) == NEW_INPUT ) { forwardPropagate( testCase.inputs, hiddenLayer, outputLayer ); printTestResults( testCase.inputs, outputLayer, testCase.desiredOutputs ); } } exit( EXIT_SUCCESS ); }
int main(const int argc, const char * argv[]) { int ret = 0; unsigned u, v, tv, t, res, yes, yes_captcha, test_num = 1000; CCaptcha * captcha = NULL; CClassifier * cl = NULL; try { throw_null((captcha = new CCaptchaBrigade1)); throw_null((cl = train(CLT_BAYES, captcha, 10000, 1000))); for(u = 0, yes = 0, yes_captcha = 0; u < test_num; u++) { yes += (res = test(cl, captcha)); if(res == nic) yes_captcha++; } printf("Right numbers: %u from %u (%lf %%)\nRight captcha: %u from %u (%lf %%)\n\n", yes, test_num * nic, yes * 100 / (double) (test_num * nic), yes_captcha, test_num, yes_captcha * 100 / (double) test_num); } catch(...) { ret = -1; fprintf(stderr, "\n-------> Error <-------\n\n"); } if(captcha != NULL) delete captcha; if(cl != NULL) delete cl; return ret; }
//学習します。 bool XLMachineLearningLibliear::Train() { int maxindex = 0; for(std::vector<feature*>::const_iterator it = ProblemX.begin() ; it != ProblemX.end() ; ++it ) { for(feature* nodes = *it; nodes->index != -1 ; ++nodes ) { maxindex = max(nodes->index,maxindex); } } struct problem prob; prob.bias = -1; //バイアス?初期化するときの引数で与えるらしいが・・・ prob.l = ProblemY.size(); //行数 prob.n = maxindex; //一番でかい素性番号 prob.y = &ProblemY[0]; prob.x = (::feature_node**) ((feature*) &ProblemX[0]); struct parameter param; param.solver_type = L2R_L2LOSS_SVC_DUAL; param.C = 1; param.nr_weight = 0; param.weight_label = NULL; param.weight = NULL; { if(param.solver_type == L2R_LR || param.solver_type == L2R_L2LOSS_SVC) param.eps = 0.01; else if(param.solver_type == L2R_L2LOSS_SVC_DUAL || param.solver_type == L2R_L1LOSS_SVC_DUAL || param.solver_type == MCSVM_CS || param.solver_type == L2R_LR_DUAL) param.eps = 0.1; else if(param.solver_type == L1R_L2LOSS_SVC || param.solver_type == L1R_LR) param.eps = 0.01; } FreeModel(); this->Model = train(&prob,¶m); return this->Model != NULL; }
/** * Trains a new classification model from the training data and output a classification model. * in_training_file Path to an XML filename file with training instances * out_model_file Path to the output file with the model * out_vocabulary_file Path to the output vocabulary file * is_unit_length Unit norm normalization of the feature vectors * */ bool Classifier::train(string input_file, bool is_unit_length) { if(_verbose) printf("Training...\n"); if(!file_exists(input_file, false)){ printf("Error: Training file '%s' not found!\n", input_file.c_str()); return false; } // Load training data TextToClassify* texts = load_texts(input_file); if(!texts){ printf("Error: cannot load text from XML file '%s'\n", input_file.c_str()); return false; } // Train the model and save it to _model_file bool result = train(texts, is_unit_length); // Delete the training data delete_text(texts); return result; }