AdaBoost::AdaBoost(const unsigned int no_of_classes, const unsigned int no_of_features) { this->is_modelfile_loaded_ = false; this->number_of_features_ = no_of_features; this->number_of_classes_ = no_of_classes; this->boost_parameters_ = CvBoostParams(CvBoost::REAL, 100, 0.95, 5, false, 0 ); }
int main() { const int train_sample_count = 300; //#define LEPIOTA #ifdef LEPIOTA const char* filename = "../../../OpenCV_SVN/samples/c/agaricus-lepiota.data"; #else const char* filename = "../../../OpenCV_SVN/samples/c/waveform.data"; #endif CvDTree dtree; CvBoost boost; CvRTrees rtrees; CvERTrees ertrees; CvMLData data; CvTrainTestSplit spl( train_sample_count ); data.read_csv( filename ); #ifdef LEPIOTA data.set_response_idx( 0 ); #else data.set_response_idx( 21 ); data.change_var_type( 21, CV_VAR_CATEGORICAL ); #endif data.set_train_test_split( &spl ); printf("======DTREE=====\n"); dtree.train( &data, CvDTreeParams( 10, 2, 0, false, 16, 0, false, false, 0 )); print_result( dtree.calc_error( &data, CV_TRAIN_ERROR), dtree.calc_error( &data ), dtree.get_var_importance() ); #ifdef LEPIOTA printf("======BOOST=====\n"); boost.train( &data, CvBoostParams(CvBoost::DISCRETE, 100, 0.95, 2, false, 0)); print_result( boost.calc_error( &data, CV_TRAIN_ERROR ), boost.calc_error( &data ), 0 ); #endif printf("======RTREES=====\n"); rtrees.train( &data, CvRTParams( 10, 2, 0, false, 16, 0, true, 0, 100, 0, CV_TERMCRIT_ITER )); print_result( rtrees.calc_error( &data, CV_TRAIN_ERROR), rtrees.calc_error( &data ), rtrees.get_var_importance() ); printf("======ERTREES=====\n"); ertrees.train( &data, CvRTParams( 10, 2, 0, false, 16, 0, true, 0, 100, 0, CV_TERMCRIT_ITER )); print_result( ertrees.calc_error( &data, CV_TRAIN_ERROR), ertrees.calc_error( &data ), ertrees.get_var_importance() ); return 0; }
void Test::trainedClassifier(){ /* STEP 2. Opening the file */ //1. Declare a structure to keep the data CvMLData cvml; //2. Read the file cvml.read_csv("samples.csv"); //3. Indicate which column is the response cvml.set_response_idx(0); /* STEP 3. Splitting the samples */ //1. Select 40 for the training CvTrainTestSplit cvtts(40, true); //2. Assign the division to the data cvml.set_train_test_split(&cvtts); printf("Training ... "); /* STEP 4. The training */ //1. Declare the classifier CvBoost boost; //2. Train it with 100 features boost.train(&cvml, CvBoostParams(CvBoost::REAL, 100, 0, 1, false, 0), false); /* STEP 5. Calculating the testing and training error */ // 1. Declare a couple of vectors to save the predictions of each sample std::vector<float> train_responses, test_responses; // 2. Calculate the training error float fl1 = boost.calc_error(&cvml,CV_TRAIN_ERROR,&train_responses); // 3. Calculate the test error float fl2 = boost.calc_error(&cvml,CV_TEST_ERROR,&test_responses); printf("Error train %f \n", fl1); printf("Error test %f \n", fl2); /* STEP 6. Save your classifier */ // Save the trained classifier boost.save("./trained_boost.xml", "boost"); //return EXIT_SUCCESS; }
/** * @author JIA Pei * @version 2009-10-04 * @brief Training * @param data Input - input data * @param categories Input - column vector * @return classification time cost */ void CClassificationAlgs::Training(const Mat_<float>& data, const Mat_<int>& categories) { unsigned int NbOfSamples = data.rows; set<int> ClassSet; for(int i = 0; i < categories.rows; i++) { ClassSet.insert(categories(i, 0)); } this->m_iNbOfCategories = ClassSet.size(); switch(this->m_iClassificationMethod) { case CClassificationAlgs::DecisionTree: this->m_CVDtree.train( data, CV_ROW_SAMPLE, categories, Mat(), Mat(), Mat(), Mat(), CvDTreeParams( INT_MAX, 2, 0, false, this->m_iNbOfCategories, 0, false, false, 0 ) ); break; case CClassificationAlgs::Boost: this->m_CVBoost.train( data, CV_ROW_SAMPLE, categories, Mat(), Mat(), Mat(), Mat(), CvBoostParams(CvBoost::DISCRETE, 50, 0.95, INT_MAX, false, 0), false ); break; case CClassificationAlgs::RandomForest: this->m_CVRTrees.train( data, CV_ROW_SAMPLE, categories, Mat(), Mat(), Mat(), Mat(), CvRTParams( INT_MAX, 2, 0, false, this->m_iNbOfCategories, 0, true, 0, 100, 0, CV_TERMCRIT_ITER ) ); break; case CClassificationAlgs::ExtremeRandomForest: this->m_CVERTrees.train(data, CV_ROW_SAMPLE, categories, Mat(), Mat(), Mat(), Mat(), CvRTParams( INT_MAX, 2, 0, false, this->m_iNbOfCategories, 0, true, 0, 100, 0, CV_TERMCRIT_ITER ) ); break; case CClassificationAlgs::SVM: this->m_CVSVM.train( data, categories, Mat(), Mat(), CvSVMParams(CvSVM::C_SVC, CvSVM::RBF, 0, 1, 0, 1, 0, 0, NULL, cvTermCriteria(CV_TERMCRIT_ITER, 1000, 1E-6) ) ); break; } }
static int build_boost_classifier( char* data_filename, char* filename_to_save, char* filename_to_load ) { const int class_count = 26; CvMat* data = 0; CvMat* responses = 0; CvMat* var_type = 0; CvMat* temp_sample = 0; CvMat* weak_responses = 0; int ok = read_num_class_data( data_filename, 16, &data, &responses ); int nsamples_all = 0, ntrain_samples = 0; int var_count; int i, j, k; double train_hr = 0, test_hr = 0; CvBoost boost; if( !ok ) { printf( "Could not read the database %s\n", data_filename ); return -1; } printf( "The database %s is loaded.\n", data_filename ); nsamples_all = data->rows; ntrain_samples = (int)(nsamples_all*0.5); var_count = data->cols; // Create or load Boosted Tree classifier if( filename_to_load ) { // load classifier from the specified file boost.load( filename_to_load ); ntrain_samples = 0; if( !boost.get_weak_predictors() ) { printf( "Could not read the classifier %s\n", filename_to_load ); return -1; } printf( "The classifier %s is loaded.\n", data_filename ); } else { // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! // // As currently boosted tree classifier in MLL can only be trained // for 2-class problems, we transform the training database by // "unrolling" each training sample as many times as the number of // classes (26) that we have. // // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! CvMat* new_data = cvCreateMat( ntrain_samples*class_count, var_count + 1, CV_32F ); CvMat* new_responses = cvCreateMat( ntrain_samples*class_count, 1, CV_32S ); // 1. unroll the database type mask printf( "Unrolling the database...\n"); for( i = 0; i < ntrain_samples; i++ ) { float* data_row = (float*)(data->data.ptr + data->step*i); for( j = 0; j < class_count; j++ ) { float* new_data_row = (float*)(new_data->data.ptr + new_data->step*(i*class_count+j)); for( k = 0; k < var_count; k++ ) new_data_row[k] = data_row[k]; new_data_row[var_count] = (float)j; new_responses->data.i[i*class_count + j] = responses->data.fl[i] == j+'A'; } } // 2. create type mask var_type = cvCreateMat( var_count + 2, 1, CV_8U ); cvSet( var_type, cvScalarAll(CV_VAR_ORDERED) ); // the last indicator variable, as well // as the new (binary) response are categorical cvSetReal1D( var_type, var_count, CV_VAR_CATEGORICAL ); cvSetReal1D( var_type, var_count+1, CV_VAR_CATEGORICAL ); // 3. train classifier printf( "Training the classifier (may take a few minutes)...\n"); boost.train( new_data, CV_ROW_SAMPLE, new_responses, 0, 0, var_type, 0, CvBoostParams(CvBoost::REAL, 100, 0.95, 5, false, 0 )); cvReleaseMat( &new_data ); cvReleaseMat( &new_responses ); printf("\n"); } temp_sample = cvCreateMat( 1, var_count + 1, CV_32F ); weak_responses = cvCreateMat( 1, boost.get_weak_predictors()->total, CV_32F ); // compute prediction error on train and test data for( i = 0; i < nsamples_all; i++ ) { int best_class = 0; double max_sum = -DBL_MAX; double r; CvMat sample; cvGetRow( data, &sample, i ); for( k = 0; k < var_count; k++ ) temp_sample->data.fl[k] = sample.data.fl[k]; for( j = 0; j < class_count; j++ ) { temp_sample->data.fl[var_count] = (float)j; boost.predict( temp_sample, 0, weak_responses ); double sum = cvSum( weak_responses ).val[0]; if( max_sum < sum ) { max_sum = sum; best_class = j + 'A'; } } r = fabs(best_class - responses->data.fl[i]) < FLT_EPSILON ? 1 : 0; if( i < ntrain_samples ) train_hr += r; else test_hr += r; } test_hr /= (double)(nsamples_all-ntrain_samples); train_hr /= (double)ntrain_samples; printf( "Recognition rate: train = %.1f%%, test = %.1f%%\n", train_hr*100., test_hr*100. ); printf( "Number of trees: %d\n", boost.get_weak_predictors()->total ); // Save classifier to file if needed if( filename_to_save ) boost.save( filename_to_save ); cvReleaseMat( &temp_sample ); cvReleaseMat( &weak_responses ); cvReleaseMat( &var_type ); cvReleaseMat( &data ); cvReleaseMat( &responses ); return 0; }
int main() { const int train_sample_count = 300; bool is_regression = false; const char* filename = "data/waveform.data"; int response_idx = 21; CvMLData data; CvTrainTestSplit spl( train_sample_count ); if(data.read_csv(filename) != 0) { printf("couldn't read %s\n", filename); exit(0); } data.set_response_idx(response_idx); data.change_var_type(response_idx, CV_VAR_CATEGORICAL); data.set_train_test_split( &spl ); const CvMat* values = data.get_values(); const CvMat* response = data.get_responses(); const CvMat* missing = data.get_missing(); const CvMat* var_types = data.get_var_types(); const CvMat* train_sidx = data.get_train_sample_idx(); const CvMat* var_idx = data.get_var_idx(); CvMat*response_map; CvMat*ordered_response = cv_preprocess_categories(response, var_idx, response->rows, &response_map, NULL); int num_classes = response_map->cols; CvDTree dtree; printf("======DTREE=====\n"); CvDTreeParams cvd_params( 10, 1, 0, false, 16, 0, false, false, 0); dtree.train( &data, cvd_params); print_result( dtree.calc_error( &data, CV_TRAIN_ERROR), dtree.calc_error( &data, CV_TEST_ERROR ), dtree.get_var_importance() ); #if 0 /* boosted trees are only implemented for two classes */ printf("======BOOST=====\n"); CvBoost boost; boost.train( &data, CvBoostParams(CvBoost::DISCRETE, 100, 0.95, 2, false, 0)); print_result( boost.calc_error( &data, CV_TRAIN_ERROR ), boost.calc_error( &data, CV_TEST_ERROR), 0 ); #endif printf("======RTREES=====\n"); CvRTrees rtrees; rtrees.train( &data, CvRTParams( 10, 2, 0, false, 16, 0, true, 0, 100, 0, CV_TERMCRIT_ITER )); print_result( rtrees.calc_error( &data, CV_TRAIN_ERROR), rtrees.calc_error( &data, CV_TEST_ERROR ), rtrees.get_var_importance() ); printf("======ERTREES=====\n"); CvERTrees ertrees; ertrees.train( &data, CvRTParams( 10, 2, 0, false, 16, 0, true, 0, 100, 0, CV_TERMCRIT_ITER )); print_result( ertrees.calc_error( &data, CV_TRAIN_ERROR), ertrees.calc_error( &data, CV_TEST_ERROR ), ertrees.get_var_importance() ); printf("======GBTREES=====\n"); CvGBTrees gbtrees; CvGBTreesParams gbparams; gbparams.loss_function_type = CvGBTrees::DEVIANCE_LOSS; // classification, not regression gbtrees.train( &data, gbparams); //gbt_print_error(&gbtrees, values, response, response_idx, train_sidx); print_result( gbtrees.calc_error( &data, CV_TRAIN_ERROR), gbtrees.calc_error( &data, CV_TEST_ERROR ), 0); printf("======KNEAREST=====\n"); CvKNearest knearest; //bool CvKNearest::train( const Mat& _train_data, const Mat& _responses, // const Mat& _sample_idx, bool _is_regression, // int _max_k, bool _update_base ) bool is_classifier = var_types->data.ptr[var_types->cols-1] == CV_VAR_CATEGORICAL; assert(is_classifier); int max_k = 10; knearest.train(values, response, train_sidx, is_regression, max_k, false); CvMat* new_response = cvCreateMat(response->rows, 1, values->type); //print_types(); //const CvMat* train_sidx = data.get_train_sample_idx(); knearest.find_nearest(values, max_k, new_response, 0, 0, 0); print_result(knearest_calc_error(values, response, new_response, train_sidx, is_regression, CV_TRAIN_ERROR), knearest_calc_error(values, response, new_response, train_sidx, is_regression, CV_TEST_ERROR), 0); printf("======== RBF SVM =======\n"); //printf("indexes: %d / %d, responses: %d\n", train_sidx->cols, var_idx->cols, values->rows); CvMySVM svm1; CvSVMParams params1 = CvSVMParams(CvSVM::C_SVC, CvSVM::RBF, /*degree*/0, /*gamma*/1, /*coef0*/0, /*C*/1, /*nu*/0, /*p*/0, /*class_weights*/0, cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, FLT_EPSILON)); //svm1.train(values, response, train_sidx, var_idx, params1); svm1.train_auto(values, response, var_idx, train_sidx, params1); svm_print_error(&svm1, values, response, response_idx, train_sidx); printf("======== Linear SVM =======\n"); CvMySVM svm2; CvSVMParams params2 = CvSVMParams(CvSVM::C_SVC, CvSVM::LINEAR, /*degree*/0, /*gamma*/1, /*coef0*/0, /*C*/1, /*nu*/0, /*p*/0, /*class_weights*/0, cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, FLT_EPSILON)); //svm2.train(values, response, train_sidx, var_idx, params2); svm2.train_auto(values, response, var_idx, train_sidx, params2); svm_print_error(&svm2, values, response, response_idx, train_sidx); printf("======NEURONAL NETWORK=====\n"); int num_layers = 3; CvMat layers = cvMat(1, num_layers, CV_32SC1, calloc(1, sizeof(double)*num_layers*1)); cvmSetI(&layers, 0, 0, values->cols-1); cvmSetI(&layers, 0, 1, num_classes); cvmSetI(&layers, 0, 2, num_classes); CvANN_MLP ann(&layers, CvANN_MLP::SIGMOID_SYM, 0.0, 0.0); CvANN_MLP_TrainParams ann_params; //ann_params.train_method = CvANN_MLP_TrainParams::BACKPROP; CvMat ann_response = cvmat_make_boolean_class_columns(response, num_classes); CvMat values2 = cvmat_remove_column(values, response_idx); ann.train(&values2, &ann_response, NULL, train_sidx, ann_params, 0x0000); //ann.train(values, &ann_response, NULL, train_sidx, ann_params, 0x0000); ann_print_error(&ann, values, num_classes, &ann_response, response, response_idx, train_sidx); #if 0 /* slow */ printf("======== Polygonal SVM =======\n"); //printf("indexes: %d / %d, responses: %d\n", train_sidx->cols, var_idx->cols, values->rows); CvMySVM svm3; CvSVMParams params3 = CvSVMParams(CvSVM::C_SVC, CvSVM::POLY, /*degree*/2, /*gamma*/1, /*coef0*/0, /*C*/1, /*nu*/0, /*p*/0, /*class_weights*/0, cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, FLT_EPSILON)); //svm3.train(values, response, train_sidx, var_idx, params3); svm3.train_auto(values, response, var_idx, train_sidx, params3); svm_print_error(&svm3, values, response, response_idx, train_sidx); #endif return 0; }