int main(int argc, char **argv)
{
	char input_file_name[1024];
	char model_file_name[1024];
	const char *error_msg;

	parse_command_line(argc, argv, input_file_name, model_file_name);
	read_problem(input_file_name);
	error_msg = svm_check_parameter(&prob,&param);

	if(error_msg)
	{
		fprintf(stderr,"Error: %s\n",error_msg);
		exit(1);
	}

	if(cross_validation)
	{
		do_cross_validation();
	}
	else
	{
		model = svm_train(&prob,&param);
		svm_save_model(model_file_name,model);
		svm_destroy_model(model);
	}
	svm_destroy_param(&param);
	free(prob.y);
	free(prob.x);
	free(x_space);

	return 0;
}
void TrainableOneClassSvmClassifier::train() {
	problem = move(createProblem());
	const char* message = svm_check_parameter(problem.get(), param.get());
	if (message != 0)
		throw invalid_argument(string("invalid SVM parameters: ") + message);
	model.reset(svm_train(problem.get(), param.get()));
	updateSvmParameters();
}
Beispiel #3
0
svm_classifier_t *svm_finish_classifier(svm_classifier_t *svm) {
    int i, j, nr_fold=5, total_correct, best_correct=0, best_c=svm->param.C, best_g=svm->param.gamma;
    const char *error_msg;
    double *result = (double *) rs_malloc(sizeof(double)*svm->problem.l,"cross validation result");

    rs_msg("Building SVM classifier...");

    if (svm->finished)
	rs_warning("SVM classifier is already trained!");

    error_msg = svm_check_parameter(&(svm->problem),&(svm->param));
    
    if(error_msg) 
	rs_error("%s",error_msg);
    
    /* Skalierung */
    _create_scaling(svm->problem,svm->feature_dim,&(svm->max),&(svm->min));
    
    for (i=0;i<svm->problem.l;i++) 
	_scale_instance(&(svm->problem.x[i]),svm->feature_dim,svm->max,svm->min);
    
    /* Cross-Validation, um C und G zu bestimmen bei RBF-Kernel */
    if (svm->param.kernel_type == RBF) {
	svm->param.probability = 0;
	for (i=0;i<C_G_ITER;i++) {
	    total_correct=0;
	    svm->param.C=pow(2,C[i]);
	    svm->param.gamma=pow(2,G[i]);
	    svm_cross_validation(&(svm->problem),&(svm->param),nr_fold,result);
	    for(j=0;j<svm->problem.l;j++) {
		if(result[j] == svm->problem.y[j])
		    ++total_correct;
	    }
	    if (total_correct > best_correct) {
		best_correct=total_correct;
		best_c=C[i];
		best_g=G[i];
	    }
	    rs_msg("C-G-Selektion-Iteration # %d: tried c=%g and g=%g => CV rate is %g; current best c=%g and g=%g with CV rate %g",i+1,pow(2,C[i]),pow(2,G[i]),total_correct*100.0/svm->problem.l,pow(2,best_c),pow(2,best_g),best_correct*100.0/svm->problem.l);
	}
	
	/* Training */
	svm->param.C=pow(2,best_c);
	svm->param.gamma=pow(2,best_g);
	svm->param.probability = 1;
    }
    
    svm->model=svm_train(&(svm->problem),&(svm->param));
    svm->finished=1;

	// @begin_add_johannes
	rs_free (result);
	// @end_add_johannes

    return svm;
}
// Calls LibSVM function svm_train for simplicity.
int SupportVectorMachine::Train() {

  // Calls svm_train to train an SVM model.
  const char *svm_check_param = svm_check_parameter(&svm_problem_,\
    &svm_parameter_);
  struct svm_model *curr_svm_model = svm_train(&svm_problem_,&svm_parameter_);
  memcpy(&svm_model_,curr_svm_model,sizeof(struct svm_model));

  return 0;
}
bool SVM::validateProblemAndParameters(){
	//Check the parameters match the problem
	const char *errorMsg = svm_check_parameter(&prob,&param);

	if( errorMsg ){
        errorLog << "validateProblemAndParameters() - Parameters do not match problem!" << endl;
		return false;
	}

	return true;
}
svm_model * SupportVectorMachine::get_svm_model(std::vector<Abalone> &learning_Abalones)
{
	svm_problem SVM_Problem;
	SVM_Problem.l = learning_Abalones.size();
	SVM_Problem.y = Abalone::get_target_values(learning_Abalones);
	svm_node **x = new svm_node*[learning_Abalones.size()];
	for (int i = 0; i < learning_Abalones.size(); i++)
	{
		x[i] = new svm_node[9];
		svm_node node1; node1.index = 1; node1.value = learning_Abalones[i].get_Sex();
		x[i][0] = node1;
		svm_node node2; node2.index = 2; node2.value = learning_Abalones[i].get_Diameter();
		x[i][1] = node2;
		svm_node node3; node3.index = 3; node3.value = learning_Abalones[i].get_Height();
		x[i][2] = node3;
		svm_node node4; node4.index = 4; node4.value = learning_Abalones[i].get_Length();
		x[i][3] = node4;
		svm_node node5; node5.index = 5; node5.value = learning_Abalones[i].get_Shell_weight();
		x[i][4] = node5;
		svm_node node6; node6.index = 6; node6.value = learning_Abalones[i].get_Shucked_weight();
		x[i][5] = node6;
		svm_node node7; node7.index = 7; node7.value = learning_Abalones[i].get_Viscera_weight();
		x[i][6] = node7;
		svm_node node8; node8.index = 8; node8.value = learning_Abalones[i].get_Whole_weight();
		x[i][7] = node8;
		svm_node node9; node9.index = -1; node9.value = '?';
		x[i][8] = node9;
	}
	SVM_Problem.x = x;
	svm_parameter SVM_Parameter;
	SVM_Parameter.C = 1;
	SVM_Parameter.svm_type = C_SVC;
	SVM_Parameter.kernel_type = LINEAR;
	SVM_Parameter.degree = 3; /* for poly */
	SVM_Parameter.gamma = 2; /* for poly/rbf/sigmoid */
	SVM_Parameter.coef0 = 1; /* for poly/sigmoid */
	SVM_Parameter.cache_size = 64;
	SVM_Parameter.eps = 0.001;
	SVM_Parameter.nr_weight = 0;
	svm_check_parameter(&SVM_Problem, &SVM_Parameter);
	 
	svm_model *model = svm_train(&SVM_Problem, &SVM_Parameter);

	/*for (int i = 0; i < learning_Abalones.size(); i++)
	{
		delete[] x[i];
	}
	delete[] x;
	delete SVM_Problem.y;*/

	return model;
}
Beispiel #7
0
svm_model* LibSVMRunner::load_model_from_config(SVMConfiguration& config,
		svm_parameter* param) {

	const char *error_msg;
	error_msg = svm_check_parameter(&prob, param,config.log);

	if (error_msg) {
		LOG(config.log, LogLevel::ERR_LEVEL, "ERROR: " + to_string(error_msg))
		return 0;
	}

	model = Malloc(svm_model, 1);

	model->l =  config.getSVCount(); //support vectors number
	model->nr_class = config.nr_class;
	model->param = *param;

	model->sv_coef = (double **) malloc(model->nr_class * sizeof(double*));
	for (int i = 0; i < config.nr_class - 1; i++) {
		model->sv_coef[i] = (double *) malloc(config.getSVCount() * sizeof(double));
		std::copy(config.alpha_y.begin(), config.alpha_y.end(), model->sv_coef[i *  config.getSVCount()]);
	}

    model->SV = armatlib(arma::mat(config.support_vectors.t()));
	// FIXME: Why below is not working?
    //model->SV = ArmaSpMatToSvmNode(config.support_vectors);

	model->rho = (double *) malloc(
			config.nr_class * (config.nr_class - 1) / 2 * sizeof(double));

	//i need change sign in b
	double local_rho = -config.b;
	
	if(config.nr_class != 2) {
		throw std::invalid_argument( "Code is not implemented for more than 2 classes right now");
	}

	memcpy(model->rho, &local_rho,
			config.nr_class * (config.nr_class - 1) / 2 * sizeof(double));

	model->free_sv = 1;


	if (config.svm_type < 2) {
		model->label = (int *) malloc(config.nr_class * sizeof(int));
		model->nSV = (int *) malloc(config.nr_class * sizeof(int));
		memcpy(model->label, config.label, config.nr_class * sizeof(int));
		memcpy(model->nSV, config.nSV, config.nr_class * sizeof(int));
	}

	return model;
}
int main(int argc, char **argv)
{
	char input_file_name[1024];
	char model_file_name[1024];
	const char *error_msg;

	parse_command_line(argc, argv, input_file_name, model_file_name);
	read_problem(input_file_name);
#ifdef CUSTOM_SOLVER
    if (param.svm_type == ONE_CLASS)
    {
        param.strong_footlier_indexes = filter_strong_footliers(input_file_name);
    }
#endif
	error_msg = svm_check_parameter(&prob,&param);

	if(error_msg)
	{
		fprintf(stderr,"Error: %s\n",error_msg);
		exit(1);
	}

	if(cross_validation)
	{
		if(param.svm_type == R2 || param.svm_type == R2q)
			fprintf(stderr, "\"R^2\" cannot do cross validation.\n");
		else
			do_cross_validation();
	}
	else
	{
		model = svm_train(&prob,&param);
		if(param.svm_type == R2 || param.svm_type == R2q)
			fprintf(stderr, "\"R^2\" does not generate model.\n");
		else if(svm_save_model(model_file_name,model))
		{
			fprintf(stderr, "can't save model to file %s\n", model_file_name);
			exit(1);
		}
		svm_free_and_destroy_model(&model);
	}
	svm_destroy_param(&param);
	free(prob.y);
	free(prob.x);
	free(x_space);
	free(line);

	return 0;
}
Beispiel #9
0
void* jpcnn_create_predictor_from_trainer(void* trainerHandle) {
  SLibSvmTrainingInfo* trainer = (SLibSvmTrainingInfo*)(trainerHandle);
  SLibSvmProblem* problem = create_svm_problem_from_training_info(trainer);
  const char* parameterCheckError = svm_check_parameter(problem->svmProblem, problem->svmParameters);
  if (parameterCheckError != NULL) {
    fprintf(stderr, "libsvm parameter check error: %s\n", parameterCheckError);
    destroy_svm_problem(problem);
    return NULL;
  }
  struct svm_model* model = svm_train(problem->svmProblem, problem->svmParameters);
  SPredictorInfo* result = (SPredictorInfo*)(malloc(sizeof(SPredictorInfo)));
  result->model = model;
  result->problem = problem;
  return result;
}
Beispiel #10
0
int main(int argc, char **argv)
{
	char input_file_name[1024];
	char model_file_name[1024];
	const char *error_msg;

	parse_command_line(argc, argv, input_file_name, model_file_name);
	read_problem(input_file_name);
	error_msg = svm_check_parameter(&prob,&param);

	if(error_msg)
	{
		fprintf(stderr,"ERROR: %s\n",error_msg);
		exit(1);
	}

	if(cross_validation)
	{
		if (nr_fold <= 10)
		{
			do_cross_validation();
		}
		else
		{
			double cv;
			nr_fold = nr_fold - 10;
			cv =  binary_class_cross_validation(&prob, &param, nr_fold);
			printf("Cross Validation = %g%%\n",100.0*cv);
		}
	}
	else
	{
		model = svm_train(&prob,&param);
		if(svm_save_model(model_file_name,model))
		{
			fprintf(stderr, "can't save model to file %s\n", model_file_name);
			exit(1);
		}
		svm_free_and_destroy_model(&model);
	}
	svm_destroy_param(&param);
	free(prob.y);
	free(prob.x);
	free(x_space);
	free(line);

	return 0;
}
Beispiel #11
0
static VALUE cModel_class_train(VALUE obj,VALUE problem,VALUE parameter) {
  const struct svm_problem *prob;
  const struct svm_parameter *param;
  struct svm_model *model;
  const char *check_error;

  Data_Get_Struct(problem, struct svm_problem, prob);
  Data_Get_Struct(parameter, struct svm_parameter, param);
  
  check_error = svm_check_parameter(prob, param);
  if(check_error != NULL) {
    rb_raise(rb_eArgError, "Parameters not valid for Problem: '%s'", check_error);
  }
  model = svm_train(prob,param);

  return Data_Wrap_Struct(cModel, 0, model_free, model);
}
int SVM::train() {
	if (problem.y == NULL || problem.x == NULL)
		return -1;
	const char* error_msg = svm_check_parameter(&problem, &param);
	if (error_msg) {
		std::cout << "ERROR: " << error_msg << std::endl;
		exit(-1);
	}
	model = svm_train(&problem, &param);
	/*for (int i = 0; i < 100000; i++)
	  if (sin(i) + cos(i) > 1.414)
	  std::cout << ".";
	  */
	main_equation = new Equation();
	svm_model_visualization(model, main_equation);
	svm_free_and_destroy_model(&model);
	return 0;
}
Beispiel #13
0
bool CmySvmArth::Train( char* path)
{
	const char *error_msg;
	error_msg = svm_check_parameter(&prob,&param);
	if(error_msg)
	{
		fprintf(stderr,"ERROR: %s\n",error_msg);
		free(prob.y);
		free(prob.x);
		free(x_space);
		free(line);
		line = NULL;
		x_space = NULL;
		return false;
	}
	if(cross_validation)
	{
		do_cross_validation();
	}
	else
	{
		model = svm_train(&prob,&param);
		if(path!=NULL&&svm_save_model(path,model))
		{
			fprintf(stderr, "can't save model to file %s\n", path);
			return false;
		}
		if(path!=NULL)
		{
			svm_free_and_destroy_model(&model);
		}
	}
	svm_destroy_param(&param);
	free(prob.y);
	free(prob.x);
	if(path!=NULL)
	{
		free(x_space);
		free(line);
		line = NULL;
		x_space = NULL;
	}
	return true;
}
int main(int argc, char **argv)
{
	char input_file_name[1024];
	char model_file_name[1024];
	const char *error_msg;

	parse_command_line(argc, argv, input_file_name, model_file_name);
	read_problem(input_file_name);
	error_msg = svm_check_parameter(&prob,&param);
	if(error_msg)
	{
		fprintf(stderr,"ERROR: %s\n",error_msg);
		exit(1);
	}

	if(cross_validation)
	{
		do_cross_validation();
	}
	else
	{
		model = svm_train(&prob,&param);
		if(svm_save_model(model_file_name,model))
		{
			fprintf(stderr, "can't save model to file %s\n", model_file_name);
			exit(1);
		}
		svm_free_and_destroy_model(&model);
	}
	svm_destroy_param(&param);
	free(prob.y);
#ifdef _DENSE_REP
	for (int i = 0; i < prob.l; ++i)
		free((prob.x+i)->values);
#else
	free(x_space);
#endif
	free(prob.x);
	free(line);

	return 0;
}
Beispiel #15
0
int SVMTrainModel::train(double &RecRate, std::vector<int> &ConfusionTable)
{ 

  if((!have_input_file_name) || (!have_model_file_name))
  {
    fprintf(stderr,"ERROR: Set Input and Model files first!\n");
    exit(1);
  }

  const char *error_msg;
  
  readProblem(input_file_name);
  error_msg = svm_check_parameter(&prob,&param);
  
  if(error_msg)
  {
    fprintf(stderr,"ERROR: %s\n",error_msg);
    exit(1);
  }
  
  if(cross_validation)
  {
    do_cross_validation(RecRate,ConfusionTable);
  }
  else
  {
    model = svm_train(&prob,&param);
    if(svm_save_model(model_file_name,model))
    {
      fprintf(stderr, "can't save model to file %s\n", model_file_name);
      exit(1);
    }
    svm_free_and_destroy_model(&model);
  }
  svm_destroy_param(&param);
  free(prob.y);
  free(prob.x);
  free(x_space);
  free(line);
  
  return 0;
}
Beispiel #16
0
void SVM::train(char* pInputSampleFileName, char* OutputModelFilename, double &dRetTrainError, double &dRetCrossValError)
{
	struct svm_parameter strSvmParameters;
	struct svm_problem strSvmProblem;
	struct svm_model *pstrSvmModel;
	const char *error_msg;
	double dCrossValError = -1;
	double dTrainError = -1;

	//set parameters
	this->setParameters(strSvmParameters);

	//read sample file
	this->read_problem(pInputSampleFileName, strSvmProblem, strSvmParameters);

	//check parameters
	error_msg = svm_check_parameter(&strSvmProblem, &strSvmParameters);

	//train model
	pstrSvmModel = svm_train(&strSvmProblem, &strSvmParameters);

	//do cross validation check
	dCrossValError = this->crossValidationSamples(strSvmProblem, strSvmParameters, 5);

	//save trained model
	svm_save_model(OutputModelFilename, pstrSvmModel);

	//test trained model with training set -> train error
	cout << "test model " << OutputModelFilename << " with the training set " << pInputSampleFileName << std::endl;
	this->test(pInputSampleFileName, OutputModelFilename, dTrainError);

	//clean up
	svm_destroy_model(pstrSvmModel);
	svm_destroy_param(&strSvmParameters);
	free(strSvmProblem.y);
	free(strSvmProblem.x);

	dRetTrainError = dTrainError;
	dRetCrossValError = dCrossValError;
}
Beispiel #17
0
int svmtrain(int argc, char **argv)
{
	char input_file_name[1024];
	char model_file_name[1024];
	const char *error_msg;

	parse_command_line(argc, argv, input_file_name, model_file_name);
	read_problem(input_file_name);
	error_msg = svm_check_parameter(&prob,&param);

	if(error_msg)
	{
		LOGD("ERROR: %s\n",error_msg);
		exit(1);
	}

	if(cross_validation)
	{
		do_cross_validation();
	}
	else
	{
		modelt = svm_train(&prob,&param);
		if(svm_save_model(model_file_name,modelt))
		{
			LOGD("can't save model to file %s\n", model_file_name);
			exit(1);
		}
		svm_free_and_destroy_model(&modelt);
	}
	svm_destroy_param(&param);
	free(prob.y);
	free(prob.x);
	free(x_space);
	free(line);

	return 0;
}
		void SvmOneVsAll::train()
		{
			scaling = buildScalingSetting(histgrams_by_name);

			models_by_name.clear();
			for (const auto& hist_positive: histgrams_by_name)
			{
				list<vector<LibSVM::NodeArray::Classified> > data_set, data_set_positive, data_set_rest;
				for (const auto& hist : histgrams_by_name)
				{
					if (hist_positive.first == hist.first) {
						auto d = buildClassfiedData(hist.second, 1);
						data_set.push_back(d);
						data_set_positive.push_back(d);
					}
					else {
						auto d = buildClassfiedData(hist.second, 0);
						data_set.push_back(d);
						data_set_rest.push_back(d);
					}
				}

				auto data_set_merged = LibSVM::mergeClassified(data_set);
				LibSVM::scale(scaling, data_set_merged);

				LibSVM::Problem prob(data_set_merged);

				const char* param_error_message = svm_check_parameter(&prob, param);
				if (param_error_message) {
					OmpStream(cerr) << "Parameter is wrong: " << param_error_message << endl;
					continue;
				}

				LibSVM::Model model(svm_train(&prob, param));
				models_by_name[hist_positive.first] = make_tuple(model, prob);
			}
		}
Beispiel #19
0
void svmtrain (double *x, int *r, int *c, 
	       double *y,
	       int    *rowindex, int *colindex,
	       int    *svm_type,
	       int    *kernel_type,
	       int    *degree,
	       double *gamma,
	       double *coef0,
	       double *cost,
	       double *nu,
	       int    *weightlabels,
	       double *weights,
	       int    *nweights,
	       double *cache,
	       double *tolerance,
	       double *epsilon,
	       int    *shrinking,
	       int    *cross,
	       int    *sparse,
	       int    *probability,
	       
	       int    *nclasses,
	       int    *nr,
	       int    *index,
	       int    *labels,
	       int    *nSV,
	       double *rho,
	       double *coefs,
	       double *sigma,
	       double *probA,
	       double *probB,

	       double *cresults,
	       double *ctotal1,
	       double *ctotal2,
	       char   **error)
{
    struct svm_parameter par;
    struct svm_problem   prob;
    struct svm_model    *model = NULL;
    int i, ii;
    const char* s;
    
    /* set parameters */
    par.svm_type    = *svm_type;
    par.kernel_type = *kernel_type;
    par.degree      = *degree;
    par.gamma       = *gamma;
    par.coef0       = *coef0;
    par.cache_size  = *cache;
    par.eps         = *tolerance;
    par.C           = *cost;
    par.nu          = *nu;
    par.nr_weight   = *nweights;
    if (par.nr_weight > 0) {
	par.weight      = (double *) malloc (sizeof(double) * par.nr_weight);
	memcpy(par.weight, weights, par.nr_weight * sizeof(double));
	par.weight_label = (int *) malloc (sizeof(int) * par.nr_weight);
	memcpy(par.weight_label, weightlabels, par.nr_weight * sizeof(int));
    }
    par.p           = *epsilon;
    par.shrinking   = *shrinking;
    par.probability = *probability;

    /* set problem */
    prob.l = *r;
    prob.y = y;
    
    if (*sparse > 0)
	prob.x = transsparse(x, *r, rowindex, colindex);
    else
	prob.x = sparsify(x, *r, *c);
    
    /* check parameters & copy error message */
    s = svm_check_parameter(&prob, &par);
    if (s) {
	strcpy(*error, s);
    } else {

	/* call svm_train */
	model = svm_train(&prob, &par);
    
	/* set up return values */

	/*	for (ii = 0; ii < model->l; ii++)
	    for (i = 0; i < *r;	i++)
	    if (prob.x[i] == model->SV[ii]) index[ii] = i+1; */
	svm_get_sv_indices(model, index);
	
	*nr  = model->l;
	*nclasses = model->nr_class;
	memcpy (rho, model->rho, *nclasses * (*nclasses - 1)/2 * sizeof(double));

	if (*probability && par.svm_type != ONE_CLASS) {
	  if (par.svm_type == EPSILON_SVR || par.svm_type == NU_SVR)
	    *sigma = svm_get_svr_probability(model);
	  else {
	    memcpy(probA, model->probA, 
		    *nclasses * (*nclasses - 1)/2 * sizeof(double));
	    memcpy(probB, model->probB, 
		    *nclasses * (*nclasses - 1)/2 * sizeof(double));
	  }
	}

	for (i = 0; i < *nclasses-1; i++)
	    memcpy (coefs + i * *nr, model->sv_coef[i],  *nr * sizeof (double));
	
	if (*svm_type < 2) {
	    memcpy (labels, model->label, *nclasses * sizeof(int));
	    memcpy (nSV, model->nSV, *nclasses * sizeof(int));
	}
	
	/* Perform cross-validation, if requested */
	if (*cross > 0)
	    do_cross_validation (&prob, &par, *cross, cresults,
				 ctotal1, ctotal2);

	/* clean up memory */
	svm_free_and_destroy_model(&model);
    }
    
    /* clean up memory */
    if (par.nr_weight > 0) {
	free(par.weight);
	free(par.weight_label);
    }
    
    for (i = 0; i < *r; i++) free (prob.x[i]);
    free (prob.x);
}
    void SVMClassifier::train()
    {
        if(class_data.size() == 0){
            printf("SVMClassifier::train() -- No training data available! Doing nothing.\n");
            return;
        }
        
        int n_classes = class_data.size();
        
        //Count the training data
        int n_data = 0;
        int dims = class_data.begin()->second[0].size();
        for(ClassMap::iterator iter = class_data.begin(); iter != class_data.end(); iter++){
            CPointList cpl = iter->second;
            if(cpl.size() == 1)
                n_data += 2;    //There's a bug in libSVM for classes with only 1 data point, so we will duplicate them later
            else
                n_data += cpl.size();
        }
        
        //Allocate space for data in an svm_problem structure
        svm_data.l = n_data;
        svm_data.y = new double[n_data];
        svm_data.x = new svm_node*[n_data]; 
        for(int i=0; i<n_data; i++)
            svm_data.x[i] = new svm_node[dims+1];
        
        //Create maps between string labels and int labels
        label_str_to_int.clear();
        label_int_to_str.clear();
        int label_n = 0;
        for(ClassMap::iterator iter = class_data.begin(); iter != class_data.end(); iter++){
            string cname = iter->first;
            label_str_to_int[cname] = label_n;
            label_int_to_str[label_n] = cname;
            //cout << "MAP: " << label_n << "   " << cname << "   Size: " << iter->second.size() << endl;
            ++label_n;
        }
                
        //Find the range of the data in each dim and calc the scaling factors to scale from 0 to 1
        scaling_factors = new double*[dims];
        for(int i=0; i<dims; i++)
            scaling_factors[i] = new double[2];
            
        //Scale each dimension separately
        for(int j=0; j<dims; j++){
            //First find the min, max, and scaling factor
            double minval = INFINITY;
            double maxval = -INFINITY;
            for(ClassMap::iterator iter = class_data.begin(); iter != class_data.end(); iter++){
                CPointList cpl = iter->second;
                for(size_t i=0; i<cpl.size(); i++){
                    if(cpl[i][j] < minval) 
                        minval = cpl[i][j];
                    if(cpl[i][j] > maxval) 
                        maxval = cpl[i][j];
                }
            }
            double factor = maxval-minval;
            double offset = minval;
            
            //Do the scaling and save the scaling factor and offset
            for(ClassMap::iterator iter = class_data.begin(); iter != class_data.end(); iter++){
                for(size_t i=0; i<iter->second.size(); i++){
                    iter->second[i][j] = (iter->second[i][j] - offset) / factor;
                }
            }
            scaling_factors[j][0] = offset;
            scaling_factors[j][1] = factor;
        }
        
        //Put the training data into the svm_problem
        int n = 0;
        for(ClassMap::iterator iter = class_data.begin(); iter != class_data.end(); iter++){
            string cname = iter->first;
            CPointList cpl = iter->second;
            
            //Account for bug in libSVM with classes with only 1 data point by duplicating it.
            if(cpl.size() == 1){
                svm_data.y[n] = label_str_to_int[cname];
                svm_data.y[n+1] = label_str_to_int[cname];
                for(int j=0; j<dims; j++){
                    svm_data.x[n][j].index = j;
                    svm_data.x[n][j].value = cpl[0][j] + 0.001;
                    svm_data.x[n+1][j].index = j;
                    svm_data.x[n+1][j].value = cpl[0][j] + 0.001;
                }
                svm_data.x[n][dims].index = -1;
                svm_data.x[n+1][dims].index = -1;
                n = n + 2;
            }
            else{
                for(size_t i=0; i<cpl.size(); i++){
                    svm_data.y[n] = label_str_to_int[cname];
                    for(int j=0; j<dims; j++){
                        svm_data.x[n][j].index = j;
                        svm_data.x[n][j].value = cpl[i][j];
                    }
                svm_data.x[n][dims].index = -1;
                n = n + 1;
                }
            }
        } 
        
        //Set the training params
        svm_parameter params;
        params.svm_type = C_SVC;
        params.kernel_type = RBF;
        params.cache_size = 100.0;  
        params.gamma = 1.0;
        params.C = 1.0;
        params.eps = 0.001;
        params.shrinking = 1;
        params.probability = 0;
        params.degree = 0;
        params.nr_weight = 0;
        //params.weight_label = 
        //params.weight = 
        
        const char *err_str = svm_check_parameter(&svm_data, &params);
        if(err_str){
            printf("SVMClassifier::train() -- Bad SVM parameters!\n");
            printf("%s\n",err_str);
            return;
        }
        
        //Grid Search for best C and gamma params
        int n_folds = min(10, n_data);  //Make sure there at least as many points as folds
        double *resp = new double[n_data];
        double best_accy = 0.0;
        double best_g = 0.0;
        double best_c = 0.0;
        
        //First, do a coarse search
        for(double c = -5.0; c <= 15.0; c += 2.0){
            for(double g = 3.0; g >= -15.0; g -= 2.0){    
                params.gamma = pow(2,g);
                params.C = pow(2,c);
                
                svm_cross_validation(&svm_data, &params, n_folds, resp);
                
                //Figure out the accuracy using these params
                int correct = 0;
                for(int i=0; i<n_data; i++){
                    if(resp[i] == svm_data.y[i])
                        ++correct;
                    double accy = double(correct) / double(n_data);
                    if(accy > best_accy){
                        best_accy = accy;
                        best_g = params.gamma;
                        best_c = params.C;
                    }
                }
            }
        }
        
        //Now do a finer grid search based on coarse results   
        double start_c = best_c - 1.0;
        double end_c = best_c + 1.0;
        double start_g = best_g + 1.0;
        double end_g = best_g - 1.0;
        for(double c = start_c; c <= end_c; c += 0.1){
            for(double g = start_g; g >= end_g; g -= 0.1){
                params.gamma = pow(2,g);
                params.C = pow(2,c);
                svm_cross_validation(&svm_data, &params, n_folds, resp);
                
                //Figure out the accuracy using these params
                int correct = 0;
                for(int i=0; i<n_data; i++){
                    if(resp[i] == svm_data.y[i])
                        ++correct;
                    double accy = double(correct) / double(n_data);
                    
                    if(accy > best_accy){
                        best_accy = accy;
                        best_g = params.gamma;
                        best_c = params.C;
                    }
                }
            }
        }

        // Set params to best found in grid search
        params.gamma = best_g;
        params.C = best_c;
    
        printf("BEST PARAMS  ncl: %i   c: %f   g: %f   accy: %f \n\n", n_classes, best_c, best_g, best_accy);
        
        //Train the SVM
        trained_model = svm_train(&svm_data, &params);
    }
void BagOfFeatures::trainSVM(int type = NU_SVC,
                            int kernel = RBF,
                            double degree = 0.05,
                            double gamma = 0.25,
                            double coef0 = 0.5,
                            double C = .05,
                            double cache = 300,
                            double eps = 0.000001,
                            double nu = 0.5,
                            int shrinking = 0,
                            int probability = 0,
                            int weight = 0)
{
    if(SVMModel != NULL)
    {
        svm_destroy_model(SVMModel);
        //svm_destroy_param(&SVMParam);
    }




    int i, j, k, l = -1;
    int totalData = 0;
    int size, length = dictionary->rows;
    int count;
    //Get the total number of training data
    for(i = 0; i < numClasses; i++)
        totalData += data[i].getTrainSize();

    // Set up the data
    struct svm_problem SVMProblem;
    SVMProblem.l = totalData;
    SVMProblem.y = new double [totalData];
    SVMProblem.x = new struct svm_node* [totalData];
    // Allocate memory
    //for(i = 0; i < totalData; i++)
    //{
    //    SVMProblem.x[i] = new struct svm_node [length+1];
    //}

    // For each class
    for(i = 0; i < numClasses; i++)
    {
        // Get the number of images
        size = data[i].getTrainSize();
        for(j = 0; j < size; j++)
        {
            l++;
            count = 0;
            for(k = 0; k < length; k++)
            {
                if(trainObject[i].histogramSet.histogram[j][k] != 0)
                    count++;
            }
            SVMProblem.x[l] = new struct svm_node [count+1];
            count = 0;
            for(k = 0; k < length; k++)
            {
                if(trainObject[i].histogramSet.histogram[j][k] != 0)
                {
                    SVMProblem.x[l][count].index = k;
                    SVMProblem.x[l][count].value = trainObject[i].histogramSet.histogram[j][k];
                    //cout << "(" << SVMProblem.x[l][count].index
                    //    << ", " << SVMProblem.x[l][count].value << ")" << endl;
                    count++;
                }
            }
            SVMProblem.x[l][count].index = -1;
            //cout << endl;
            //SVMProblem.x[l][count].value = -1;
            // Copy the histograms
            //for(k = 0; k < length; k++)
            //{
            //    SVMProblem.x[l][k].index = k;
            //    SVMProblem.x[l][k].value = trainObject[i].histogramSet.histogram[j][k];
            //}
            // End of the data
            //SVMProblem.x[l][length].index = -1;
            //SVMProblem.x[l][length].value = -1;
            //Attach the labels

            SVMProblem.y[l] = data[i].getLabel();
            //cout << "Label: " << SVMProblem.y[l] << endl;
        }
    }

    // Types
    SVMParam.svm_type = type;
    SVMParam.kernel_type = kernel;
    // Parameters
    SVMParam.degree = degree;
    SVMParam.gamma = gamma;
    SVMParam.coef0 = coef0;
    SVMParam.C = C;
    // For training only
    SVMParam.cache_size = cache;
    SVMParam.eps = eps;
    SVMParam.nu = nu;
    SVMParam.shrinking = shrinking;
    SVMParam.probability = probability;
    // Don't change the weights
    SVMParam.nr_weight = weight;


    double* target = new double [totalData];
    svm_check_parameter(&SVMProblem, &SVMParam);
    svm_cross_validation(&SVMProblem, &SVMParam, 10, target);
    SVMModel = svm_train(&SVMProblem, &SVMParam);
    delete [] target;

    classifierType = LIBSVM_CLASSIFIER;

}
Beispiel #22
0
    //train the svm using the parameters defined inside this method
    void ML2::trainData()
    {

        //file to store the svm model structure
	    string model_file_name1 = flowstatistics_train_name+"_model.csv";
        const char *model_file_name = model_file_name1.c_str();
        //file to read the data from
	    char input_file_name[1024] = "velocityArray.csv";
        //char input_file_name2[1024] = "data/flowstatistics_train_mu.csv";
	    const char *error_msg;

       //parameters of the svm
        /*
       "-s svm_type : set type of SVM (default 0)\n"
	    "	0 -- C-SVC		(multi-class classification)\n"
	    "	1 -- nu-SVC		(multi-class classification)\n"
	    "	2 -- one-class SVM\n"
	    "	3 -- epsilon-SVR	(regression)\n"
	    "	4 -- nu-SVR		(regression)\n"
	    "-t kernel_type : set type of kernel function (default 2)\n"
	    "	0 -- linear: u'*v\n"
	    "	1 -- polynomial: (gamma*u'*v + coef0)^degree\n"
	    "	2 -- radial basis function: exp(-gamma*|u-v|^2)\n"
	    "	3 -- sigmoid: tanh(gamma*u'*v + coef0)\n"
	    "	4 -- precomputed kernel (kernel values in training_set_file)\n"
	    "-d degree : set degree in kernel function (default 3)\n"
	    "-g gamma : set gamma in kernel function (default 1/num_features)\n"
	    "-r coef0 : set coef0 in kernel function (default 0)\n"
	    "-c cost : set the parameter C of C-SVC, epsilon-SVR, and nu-SVR (default 1)\n"
	    "-n nu : set the parameter nu of nu-SVC, one-class SVM, and nu-SVR (default 0.5)\n"
	    "-p epsilon : set the epsilon in loss function of epsilon-SVR (default 0.1)\n"
	    "-m cachesize : set cache memory size in MB (default 100)\n"
	    "-e epsilon : set tolerance of termination criterion (default 0.001)\n"
	    "-h shrinking : whether to use the shrinking heuristics, 0 or 1 (default 1)\n"
	    "-b probability_estimates : whether to train a SVC or SVR model for probability estimates, 0 or 1 (default 0)\n"
	    "-wi weight : set the parameter C of class i to weight*C, for C-SVC (default 1)\n"
	    "-v n: n-fold cross validation mode\n"
	    "-q : quiet mode (no outputs)\n"
        */
	    //set the parameters to be used for svm
        param.svm_type = 4;
	    param.kernel_type = 1;//RBF;
	    param.degree = 2;
	    param.gamma = 0.125;	// 1/num_features
	    param.coef0 = 0;
	    param.nu = 0.4;
	    param.cache_size = 100;
	    param.C = 0.125;
	    param.eps = 1e-3;
	    param.p = 0.1;
	    param.shrinking = 1;
	    param.probability = 0;
	    param.nr_weight = 0;
	    param.weight_label = NULL;
	    param.weight = NULL;
        //param.v = 10;
        nr_fold =10;

        //read from the data file
	    read_problem_data( input_file_name, flowcol);

        //checking the parameters, if they are set correctly
	    error_msg = svm_check_parameter(&prob,&param);
	    if(error_msg)
	    {
		    cout<<"ERROR: "<<error_msg<<endl<<flush;
		    exit(1);
	    }

        //do_cross_validation();
        // first do grid search do find optimal parameters 
        //paramSelection();  

        
        // then do training with optimal parameters
        //param.gamma = best_gamma;
	    //param.C = best_C;

        cout<< "start training\n"<<endl<<flush; 
        model = svm_train(&prob,&param);
        cout<< "end training\n"<<endl<<flush; 
        
        // then do cross fold validation
        cout<< "start with cross validation" <<endl<<flush; 
	    do_cross_validation();
        cout<< "end cross validation" <<endl<<flush; 
       
        //save model
	    if(svm_save_model(model_file_name,model))
	    {
		    cout<< "can't save model to file "<< model_file_name <<endl<<flush; 
		    exit(1);
	    } 
	
	
        //free all the pointers used, except the model which is required for prediction
//             svm_destroy_param(&param);
// 	    free(prob.y);
// 	    free(prob.x);
// 	    free(x_space);
// 	    free(line);
	    return;
    }
Beispiel #23
0
void SVModel::initStrutturaDati(int LabelTrSelSize,int *label_training_selection,  // info sul Training Set
                                int FeatSelSize,int *feature_sel,             // info sulle feature da usare
                                int LabelSelIdx,                                         // info sulla label da usare
                                double *f,size_t *FeatSize,double *l,size_t *LabelSize){ // info su tutto il dataset
	int i, ii, j, k, kk;
	int elements, max_index, sc, label_vector_row_num, featsize;
	double *samples, *labels;

	prob.x = NULL;
	prob.y = NULL;
	x_space = NULL;

	labels = l;
	samples = f;
	sc = (int)FeatSize[1];

	elements = 0;
	// the number of instance
	prob.l = LabelTrSelSize;
    featsize = (int)FeatSize[0];
	label_vector_row_num = (int)LabelSize[0]; 

	if(label_vector_row_num!=featsize)
	{
		fprintf(stderr,"#Labels != #Features!!!");
		exit(1);
	}
    int TotIstanze=featsize; 


	if(param.kernel_type == PRECOMPUTED)
		elements = prob.l * (sc + 1);
	else
	{
		for(ii = 0; ii < prob.l; ii++)
		{
            i=label_training_selection[ii]; 
			for(kk = 0; kk < FeatSelSize /*sc*/; kk++){
                k = feature_sel[kk]; 
				if(samples[k * prob.l + i] != 0)
					elements++;
            }
			// count the '-1' element
			elements++;
		}
	}

    prob.y = new double [prob.l]; 
    prob.x = new struct svm_node* [prob.l]; 
    x_space = new struct svm_node [elements]; 

	max_index = sc;
	j = 0;
	for(ii = 0; ii < prob.l; ii++)
	{
        i=label_training_selection[ii]; 
		prob.x[ii] = &x_space[j];
		prob.y[ii] = labels[LabelSelIdx * TotIstanze + i];

		for(kk = 0; kk < FeatSelSize /*sc*/; kk++)
		{
            k = feature_sel[kk]; 
			if(param.kernel_type == PRECOMPUTED || samples[k * prob.l + i] != 0)
			{
				x_space[j].index = k + 1;
				x_space[j].value = samples[k * TotIstanze + i]; 
				j++;
			}
		}
		x_space[j++].index = -1;
	}


// ROBA PER DEBUG 
if (0){
	for(ii = 0; ii < 10; ii++){
        i=label_training_selection[ii]; 
        cout << " " << labels[LabelSelIdx * TotIstanze + i]; 
    }
    cout << endl<< endl;

	for(ii = 0; ii < 10; ii++){
        i=label_training_selection[ii]; 
        cout << " " << i; 
    }
    cout << endl<< endl;
    for(ii = 0; ii < 10; ii++){
        i=label_training_selection[ii]; 
		for(kk = 0; kk < FeatSelSize /*sc*/; kk++){
            k = feature_sel[kk]; 
			if(param.kernel_type == PRECOMPUTED || samples[k * prob.l + i] != 0){
				cout << " " << samples[k * TotIstanze + i]; 
			}
		}
        cout << endl; 
	}
    // exit(1);
}
// FINE DI ROBA



	if(param.gamma == 0 && max_index > 0)
		param.gamma = 1.0/max_index;

	if(param.kernel_type == PRECOMPUTED)
		for(ii=0; ii<prob.l; ii++)
		{
			if((int)prob.x[ii][0].value <= 0 || (int)prob.x[ii][0].value > max_index)
			{
				fprintf(stderr,"Wrong input format: sample_serial_number out of range\n");
				exit(1);
			}
		}
    const char* error_msg = svm_check_parameter(&prob, &param);

    if(error_msg)
    {
        if (error_msg != NULL)
            fprintf(stderr,"Errore: %s\n", error_msg);
        exit(1); 
    }
    //     
    assegnato=true; 
}
Beispiel #24
0
Transformation SVMTrain::analyze(const DataSet* dataset) const {
  G_INFO("Doing SVMTrain analysis...");

  QStringList sdescs = selectDescriptors(dataset->layout(), StringType, _descriptorNames, _exclude, false);

  if (!sdescs.isEmpty()) {
    throw GaiaException("SVMTrain: if you want to use string descriptors for training your SVM, "
                        "you first need to enumerate them using the 'enumerate' transform on them. "
                        "String descriptors: ", sdescs.join(", "));
  }

  QStringList descs = selectDescriptors(dataset->layout(), UndefinedType, _descriptorNames, _exclude);
  // sort descriptors in the order in which they are taken inside the libsvm dataset
  sort(descs.begin(), descs.end(), DescCompare(dataset->layout()));
  Region region = dataset->layout().descriptorLocation(descs);

  QStringList classMapping = svm::createClassMapping(dataset, _className);

  // first, convert the training data into an SVM problem structure
  // NB: all checks about fixed-length and type of descriptors are done in this function
  struct svm_problem prob = svm::dataSetToLibsvmProblem(dataset, _className, region, classMapping);

  // also get dimension (this trick works because a vector in there is the number
  // of dimensions + 1 for the sentinel, and we're not in a sparse representation)
  int dimension = prob.x[1] - prob.x[0] - 1;


  // default values
  struct svm_parameter param;
  //param.svm_type = C_SVC;
  //param.kernel_type = RBF;
  //param.degree = 3;
  //param.gamma = 0;	// 1/k
  param.coef0 = 0;
  param.nu = 0.5;
  param.cache_size = 100;
  //param.C = 1;
  param.eps = 1e-3;
  param.p = 0.1;
  param.shrinking = 1;
  //param.probability = 0;
  param.nr_weight = 0;
  param.weight_label = NULL;
  param.weight = NULL;

  // get parameters
  QString svmType = _params.value("type", "C-SVC").toString().toLower();
  param.svm_type = _svmTypeMap.value(svmType);
  QString kernelType = _params.value("kernel", "RBF").toString().toLower();
  param.kernel_type = _kernelTypeMap.value(kernelType);
  param.degree = _params.value("degree", 3).toInt();
  param.C = _params.value("c", 1).toDouble();
  param.gamma = _params.value("gamma", 1.0/dimension).toDouble();
  param.probability = _params.value("probability", false).toBool() ? 1 : 0;


  const char* error_msg = svm_check_parameter(&prob, &param);

  if (error_msg) {
    throw GaiaException(error_msg);
  }


  // do it!
  struct svm_model* model;

  const bool crossValidation = false;
  if (crossValidation) {
    int nr_fold = 10;
    int total_correct = 0;
    double total_error = 0;
    double sumv = 0, sumy = 0, sumvv = 0, sumyy = 0, sumvy = 0;
    double* target = new double[prob.l];

    svm_cross_validation(&prob, &param, nr_fold, target);

    if (param.svm_type == EPSILON_SVR ||
        param.svm_type == NU_SVR) {
      for (int i=0; i<prob.l; i++) {
        double y = prob.y[i];
        double v = target[i];
        total_error += (v-y)*(v-y);
        sumv += v;
        sumy += y;
        sumvv += v*v;
        sumyy += y*y;
        sumvy += v*y;
      }
      G_INFO("Cross Validation Mean squared error =" << total_error/prob.l);
      G_INFO("Cross Validation Squared correlation coefficient =" <<
             ((prob.l*sumvy - sumv*sumy) * (prob.l*sumvy - sumv*sumy)) /
             ((prob.l*sumvv - sumv*sumv) * (prob.l*sumyy - sumy*sumy))
             );
    }
    else {
      for (int i=0; i<prob.l; i++)
        if (target[i] == prob.y[i])
          ++total_correct;
      G_INFO("Cross Validation Accuracy =" << 100.0*total_correct/prob.l << "%");
    }
  }
  else { // !crossValidation
    model = svm_train(&prob, &param);
  }

  // save model to a temporary file (only method available from libsvm...),
  // reload it and put it into a gaia2::Parameter
  QTemporaryFile modelFile;
  modelFile.open();
  QString modelFilename = modelFile.fileName();
  modelFile.close();

  if (svm_save_model(modelFilename.toAscii().constData(), model) == -1) {
    throw GaiaException("SVMTrain: error while saving SVM model to temp file");
  }

  modelFile.open();
  QByteArray modelData = modelFile.readAll();
  modelFile.close();


  // if we asked for the model to be output specifically, also do it
  if (_params.value("modelFilename", "").toString() != "") {
    QString filename = _params.value("modelFilename").toString();
    svm_save_model(filename.toAscii().constData(), model);
  }

  // destroy the model allocated by libsvm
  svm_destroy_model(model);

  Transformation result(dataset->layout());
  result.analyzerName = "svmtrain";
  result.analyzerParams = _params;
  result.applierName = "svmpredict";
  result.params.insert("modelData", modelData);
  result.params.insert("className", _params.value("className"));
  result.params.insert("descriptorNames", descs);
  result.params.insert("classMapping", classMapping);
  result.params.insert("probability", (param.probability == 1 && (param.svm_type == C_SVC ||
                                                                  param.svm_type == NU_SVC)));
  return result;
}
Beispiel #25
0
int
svmTrain (double gamma, std::vector<std::vector<svm_node> > data, std::vector<int> labels)
{
  struct svm_model* model;
  struct svm_problem prob;      
  struct svm_node* x_space;     
  struct svm_parameter param;   

  // Make sure we have data
  if (data.empty ())
  {
    CORE_ERROR ("No training data\n");
    return (-1);  
  }
  
  prob.l = data.size ();
  prob.y = Malloc (double, prob.l);
  prob.x = Malloc (struct svm_node* , prob.l);
  x_space = Malloc (struct svm_node, data.size () * data[0].size ());
  
  int j = 0;
  for (int i = 0; i < prob.l; ++i)
  {
    prob.y[i] = *(labels.begin () + i);
    prob.x[i] = &x_space[j];
    for (std::vector<svm_node>::iterator it = data[i].begin (); it != data[i].end (); ++it)
    {
      x_space[j].index = it->index;
      x_space[j].value = it->value;
      ++j;
    } 
  }

  param.svm_type = C_SVC;
  param.kernel_type = RBF;
  param.gamma = gamma; 
  param.degree = 3;
  param.coef0 = 0;
  param.nu = 0.5;
  param.cache_size = 100;
  param.C = 1;
  param.eps = 1e-3;
  param.p = 0.1;
  param.shrinking = 1;
  param.probability = 0;
  param.nr_weight = 0;
  param.weight_label = NULL;
  param.weight = NULL;

  // Check whether the parameters are within a feasible range of the problem
  const char* error_msg = svm_check_parameter (&prob, &param);
  if (error_msg)
  {
    CORE_ERROR ("Error checking SVM parameters: %s\n", error_msg);
    return (-1);
  }

  model = svm_train (&prob, &param);
  if (svm_save_model ("model.txt", model))
  {
    CORE_ERROR ("Save SVM model failed\n");
    return (-1);
  }

  svm_free_and_destroy_model (&model);
  svm_destroy_param (&param);
  free (prob.y);
  free (prob.x);
  free (x_space);

  return (0);
}
Beispiel #26
0
bool LibSVMRunner::save_model_to_config(SVMConfiguration& config,
		svm_parameter* param, svm_problem& problem) {

	const char *error_msg;

	error_msg = svm_check_parameter(&prob, param, config.log);

	if (error_msg) {
		LOG(config.log, LogLevel::ERR_LEVEL, "ERROR: " + to_string(error_msg))
		return false;
	}
	//int* nr = Malloc(int, 1);
	int* nclasses = Malloc(int, 1);

	model = svm_train(&prob, param, config.log);
	//*nr = config.support_vectors.n_rows; //support vectors
	*nclasses = model->nr_class;
	config.nr_class = model->nr_class;
	LOG(config.log, LogLevel::TRACE_LEVEL, "save_model_to_config writing down alphas, nclasses= " + svm_to_str(config.nr_class));

	int nr_support_vectors = model->l;
    //conversion vec->SpCol
    arma::vec alpha_y_tmp = arma::vec(model->sv_coef[0], nr_support_vectors);
    //not my fault. Arma fault :)
	config.alpha_y = arma::zeros(nr_support_vectors);
    for(int i=0;i<nr_support_vectors;++i){
        if(alpha_y_tmp(i) != 0){
            config.alpha_y(i) = alpha_y_tmp(i);
        }
    }

	if(config.nr_class != 2) {
		throw std::invalid_argument( "Code is not implemented for more than 2 classes right now");
	}

	config.b = -model->rho[0];
	config.iter = model->iter;
	// memcpy(config.rho, ,
	// 		config.nr_class * (config.nr_class - 1) / 2 * sizeof(double));

	//config.sv_indices = (int*) malloc(config.l * sizeof(int));
	//svm_get_sv_indices(model, config.sv_indices, config.log);

	int dim = config.getDataDim();
	ASSERT(dim > 0);
	//config.support_vectors = SvmUtils::libtoarma(model->SV, nr_support_vectors, dim);
    //
	LOG(config.log, LogLevel::TRACE_LEVEL, "save_model_to_config writing down SV, n_SV = " + svm_to_str(nr_support_vectors));
	config.support_vectors = SvmUtils::SvmNodeToArmaSpMat(model->SV, nr_support_vectors, dim);
	LOG(config.log, LogLevel::TRACE_LEVEL, "save_model_to_config wrote down SV, n_SV = " + svm_to_str(config.support_vectors.n_cols));
	LOG(config.log, LogLevel::TRACE_LEVEL, "save_model_to_config wrote down SV, dim = " + svm_to_str(config.support_vectors.n_rows));

	//	TODO: WTF!!!!!???
	if (config.svm_type < 2) {
		config.label = (int *) malloc(*nclasses * sizeof(int));
		config.nSV = (int *) malloc(*nclasses * sizeof(int));
		memcpy(config.label, model->label, *nclasses * sizeof(int));
		memcpy(config.nSV, model->nSV, *nclasses * sizeof(int));
	}

    config.neg_target = model->label[1];
    config.pos_target = model->label[0];

	svm_destroy_param(param,config.log);
	svm_free_and_destroy_model(&model,config.log);

	return true;
}
Beispiel #27
0
/* @return 		   -> cross validation error
 * 				-1 -> could not open statistic_filename
 *
 */
int SVM::crossValidationParameters(char* pSampleFilename, char* pFixedTestFilename, char* pStatisticsFilename)
{
	struct svm_parameter strSvmParameters;
	struct svm_problem strSvmProblem;
	struct svm_model* pstrSvmModel;
	const char *error_msg;
	double dCrossValError = -1;
	double dFixTestSetError = -1;
	double dTrainError = -1;
	ofstream outputStatisticFile;
	char *pTempModelFilename = "tempSvmModel.tmp";
	double dGamma = -1;
	double dC = -1;
	char tempText[1000];

	//open statistic file
	outputStatisticFile.open(pStatisticsFilename, std::ios::trunc);

	if(!outputStatisticFile.is_open())
	{
		cout << "can not open statistic file " << pStatisticsFilename << std::endl;
		return -1;
	}

	//initial parameters
	this->setParameters(strSvmParameters);

	this->read_problem(pSampleFilename, strSvmProblem, strSvmParameters);
	error_msg = svm_check_parameter(&strSvmProblem, &strSvmParameters);

	outputStatisticFile << "\% " << pSampleFilename << std::endl;
	outputStatisticFile << "\% Gamma \t C \t TrainError \t CrossValError \t FixedTestError" << std::endl;
	cout 				<< "\% Gamma \t C \t TrainError \t CrossValError \t FixedTestError" << std::endl;


	for(int iExponentGamma = -25; iExponentGamma <= -2; ++iExponentGamma)
	{
		dGamma = pow(2.0, iExponentGamma);
		strSvmParameters.gamma = dGamma;

		for(int iExponentC = -1; iExponentC <= 15; ++iExponentC)
		{
			dC = pow(2.0, iExponentC);
			strSvmParameters.C = dC;

			//train
			pstrSvmModel = svm_train(&strSvmProblem, &strSvmParameters);
			svm_save_model(pTempModelFilename, pstrSvmModel);
			svm_destroy_model(pstrSvmModel);

			//test with train-set
			this->test(pSampleFilename, pTempModelFilename, dTrainError);

			//crossval
			dCrossValError = this->crossValidationSamples(strSvmProblem, strSvmParameters, 5);

			//test with fixed test-set
			this->test(pFixedTestFilename, pTempModelFilename, dFixTestSetError);

			remove(pTempModelFilename);

			sprintf(tempText, "%.15lf\t%10lf\t%10lf\t%10lf\t%10lf", dGamma, dC, dTrainError, dCrossValError, dFixTestSetError);

			outputStatisticFile << tempText << std::endl;
			cout 				<< "\n#######################################################################" << std::endl;
			cout				<< tempText << std::endl;
			cout 				<< "#######################################################################\n" << std::endl;
		}
	}

	//clean up
	outputStatisticFile.close();
	svm_destroy_param(&strSvmParameters);
	free(strSvmProblem.y);
	free(strSvmProblem.x);
	//free(pTempModelFilename);

	return 0;
}
Beispiel #28
0
int main(int argc, char **argv)
{
	#ifdef WIN32
		// Send all reports to STDOUT
		_CrtSetReportMode( _CRT_WARN, _CRTDBG_MODE_FILE );
		_CrtSetReportFile( _CRT_WARN, _CRTDBG_FILE_STDOUT );
		_CrtSetReportMode( _CRT_ERROR, _CRTDBG_MODE_FILE );
		_CrtSetReportFile( _CRT_ERROR, _CRTDBG_FILE_STDOUT );
		_CrtSetReportMode( _CRT_ASSERT, _CRTDBG_MODE_FILE );
		_CrtSetReportFile( _CRT_ASSERT, _CRTDBG_FILE_STDOUT );

		// enable the options
		SET_CRT_DEBUG_FIELD( _CRTDBG_DELAY_FREE_MEM_DF );
		SET_CRT_DEBUG_FIELD( _CRTDBG_LEAK_CHECK_DF );
	#endif
		
	printf("int %d, short int %d, char %d, double %d, float %d, node %d\n",sizeof(int),sizeof(short int), sizeof(char), sizeof(double), sizeof(float), sizeof(svm_node));

	char input_file_name[FILENAME_LEN];    
	char model_file_name[FILENAME_LEN];
	const char *error_msg;

	parse_command_line(argc, argv, input_file_name, model_file_name);
    read_problem(input_file_name);
	param.modelFile = model_file_name;

	printf ("Finish reading input files!\n");

	error_msg = svm_check_parameter(&prob,&param);	

	#ifdef WIN32
		assert(_CrtCheckMemory());
	#endif

	if(error_msg)
	{
		fprintf(stderr,"Error: %s\n",error_msg);
		exit(1);
	}

    double duration;
	double start = getRunTime();
	if(cross_validation)
	{
		do_cross_validation();
	}
	else
	{
		printf("kernel: %d\n",param.kernel_type);
		model = svm_train(&prob,&param);
        double finish = getRunTime();	
        duration = (double)(finish - start);

    #ifdef WIN32
		assert(_CrtCheckMemory());
	#endif

		svm_save_model(model_file_name,model);
		svm_destroy_model(model);
	}
	
	printf("CPU Time = %f second\n", duration);
    FILE* fModel = fopen(model_file_name, "a+t");					// append mode
	fprintf(fModel, "CPU Time = %f second\n", duration);
	fclose(fModel);
	    
    svm_destroy_param(&param);
	free(prob.y);
	free(prob.x);
	free(x_space);

	#ifdef WIN32
		assert(_CrtCheckMemory());
	#endif

    return 0;
}
Beispiel #29
0
// Interface function of matlab
// now assume prhs[0]: label prhs[1]: features
void mexFunction( int nlhs, mxArray *plhs[],
		int nrhs, const mxArray *prhs[] )
{
	const char *error_msg;

	// fix random seed to have same results for each run
	// (for cross validation and probability estimation)
	srand(1);

	if(nlhs > 1)
	{
		exit_with_help();
		fake_answer(nlhs, plhs);
		return;
	}

	// Transform the input Matrix to libsvm format
	if(nrhs > 1 && nrhs < 4)
	{
		int err;

		if(!mxIsDouble(prhs[0]) || !mxIsDouble(prhs[1])) {
			mexPrintf("Error: label vector and instance matrix must be double\n");
			fake_answer(nlhs, plhs);
			return;
		}

		if(parse_command_line(nrhs, prhs, NULL))
		{
			exit_with_help();
			svm_destroy_param(&param);
			fake_answer(nlhs, plhs);
			return;
		}

		if(mxIsSparse(prhs[1]))
		{
			if(param.kernel_type == PRECOMPUTED)
			{
				// precomputed kernel requires dense matrix, so we make one
				mxArray *rhs[1], *lhs[1];

				rhs[0] = mxDuplicateArray(prhs[1]);
				if(mexCallMATLAB(1, lhs, 1, rhs, "full"))
				{
					mexPrintf("Error: cannot generate a full training instance matrix\n");
					svm_destroy_param(&param);
					fake_answer(nlhs, plhs);
					return;
				}
				err = read_problem_dense(prhs[0], lhs[0]);
				mxDestroyArray(lhs[0]);
				mxDestroyArray(rhs[0]);
			}
			else
				err = read_problem_sparse(prhs[0], prhs[1]);
		}
		else
			err = read_problem_dense(prhs[0], prhs[1]);

		// svmtrain's original code
		error_msg = svm_check_parameter(&prob, &param);

		if(err || error_msg)
		{
			if (error_msg != NULL)
				mexPrintf("Error: %s\n", error_msg);
			svm_destroy_param(&param);
			free(prob.y);
			free(prob.x);
			free(x_space);
			fake_answer(nlhs, plhs);
			return;
		}

		if(cross_validation)
		{
			double *ptr;
			plhs[0] = mxCreateDoubleMatrix(1, 1, mxREAL);
			ptr = mxGetPr(plhs[0]);
			ptr[0] = do_cross_validation();
		}
		else
		{
			int nr_feat = (int)mxGetN(prhs[1]);
			const char *error_msg;
			model = svm_train(&prob, &param);
			error_msg = model_to_matlab_structure(plhs, nr_feat, model);
			if(error_msg)
				mexPrintf("Error: can't convert libsvm model to matrix structure: %s\n", error_msg);
			svm_free_and_destroy_model(&model);
		}
		svm_destroy_param(&param);
		free(prob.y);
		free(prob.x);
		free(x_space);
	}
	else
	{
		exit_with_help();
		fake_answer(nlhs, plhs);
		return;
	}
}
Beispiel #30
0
int main(int argc, char *argv[]) {

    float labels[4] = {1.0, 1.0, -1.0, -1.0};
    cv::Mat labelsMat(4, 1, CV_32FC1, labels);
    float trainingData[4][2] = {{501, 10}, {255, 10},
                                {501, 255}, {10, 501}};
    cv::Mat trainingDataMat(4, 2, CV_32FC1, trainingData);
    
    svm_parameter param;
    param.svm_type = C_SVC;
    param.kernel_type = LINEAR;
    param.degree = 3;
    param.gamma = 0;
    param.coef0 = 0;
    param.nu = 0.5;
    param.cache_size = 100;
    param.C = 1;
    param.eps = 1e-6;
    param.p = 0.1;
    param.shrinking = 1;
    param.probability = 1;
    param.nr_weight = 0;
    param.weight_label = NULL;
    param.weight = NULL;
    
    svm_problem svm_prob_vector = libSVMWrapper(
       trainingDataMat, labelsMat, param);
    struct svm_model *model = new svm_model;
    if (svm_check_parameter(&svm_prob_vector, &param)) {
       std::cout << "ERROR" << std::endl;
    } else {
       model = svm_train(&svm_prob_vector, &param);
    }

    bool is_compute_probability = true;
    std::string model_file_name = "svm";
    bool save_model = true;
    if (save_model) {
       try {
          svm_save_model(model_file_name.c_str(), model);
          std::cout << "Model file Saved Successfully..." << std::endl;
       } catch(std::exception& e) {
          std::cout << e.what() << std::endl;
       }
    }


    bool is_probability_model = svm_check_probability_model(model);
    int svm_type = svm_get_svm_type(model);
    int nr_class = svm_get_nr_class(model);  // number of classes
    double *prob_estimates = new double[nr_class];

    cv::Vec3b green(0, 255, 0);
    cv::Vec3b blue(255, 0, 0);
    int width = 512, height = 512;
    cv::Mat image = cv::Mat::zeros(height, width, CV_8UC3);
    for (int i = 0; i < image.rows; ++i) {
       for (int j = 0; j < image.cols; ++j) {
          cv::Mat sampleMat = (cv::Mat_<float>(1, 2) << j, i);
              
          int dims = sampleMat.cols;
          svm_node* test_pt = new svm_node[dims];
          for (int k = 0; k < dims; k++) {
             test_pt[k].index = k + 1;
             test_pt[k].value = static_cast<double>(sampleMat.at<float>(0, k));
          }
          test_pt[dims].index = -1;

          float response = 0.0f;
          if (is_probability_model && is_compute_probability) {
             response = svm_predict_probability(model, test_pt, prob_estimates);
          } else {
             response = svm_predict(model, test_pt);
          }
          
          /*
          std::cout << "Predict: " << prob << std::endl;
          for (int y = 0; y < nr_class; y++) {
             std::cout << prob_estimates[y] << "  ";
          }std::cout <<  std::endl;
          */
          
          if (prob_estimates[0] > 0.5 || response == 1) {
             image.at<cv::Vec3b>(i, j)  = green;
          } else if (prob_estimates[1] >= 0.5 || response == -1) {
             image.at<cv::Vec3b>(i, j)  = blue;
          }
       }
    }
    cv::imshow("image", image);
    cv::waitKey(0);
    return 0;
}