/**
                     * Allows to set the call template, throws in case the
                     * template parameters are not found!
                     * @param call_templ the call template
                     */
                    inline void set_call_template(string & call_templ) {
                        //Store the template
                        m_call_templ = call_templ;

                        //Check the presence of the parameters
                        check_parameter(WORK_DIR_TEMPL_PARAM_NAME);
                        check_parameter(JOB_UID_TEMPL_PARAM_NAME);
                        check_parameter(LANGUAGE_TEMPL_PARAM_NAME);
                    }
Exemplo n.º 2
0
Arquivo: train.cpp Projeto: cental/stc
int train_fs(const char* input_file_name, const char* model_file_name){
	// Initialization
	const char* error_msg;
	set_default_params();
	read_problem(input_file_name);
	error_msg = check_parameter(&prob,&param);
	if(error_msg){
		fprintf(stderr,"Error: %s\n",error_msg);
		return -1;
	}

	// Do the cross-validation and save accuracy
	double accuracy = do_cross_validation(nr_fold);
	std::string info_fpath = std::string(model_file_name) + ".info";
	FILE* info = fopen(info_fpath.c_str(), "w");
	fprintf(info, "Accuracy : %f", accuracy);
	//fflush(info);	
	fclose(info);

	// Train a model on the whole dataset
	model_train=train(&prob, &param);
	if(save_model(model_file_name, model_train)){
		fprintf(stderr,"can't save model to file %s\n",model_file_name);
		return -1;
	}

	// Free resources
	destroy_param(&param);
	free(prob.y);
	free(prob.x);
	free(x_space);
	free(line);
	
	return 0;
}
Exemplo n.º 3
0
int main(int argc, char **argv)
{
	char input_file_name[1024];
	char model_file_name[1024];
	const char *error_msg;

	parse_command_line(argc, argv, input_file_name, model_file_name);
	read_problem(input_file_name);
	error_msg = check_parameter(&prob,&param);

	if(error_msg)
	{
		fprintf(stderr,"Error: %s\n",error_msg);
		exit(1);
	}

	if(flag_cross_validation)
	{
		do_cross_validation();
	}
	else
	{
		model_=train(&prob, &param);
		save_model(model_file_name, model_);
		destroy_model(model_);
	}
	destroy_param(&param);
	free(prob.y);
	free(prob.x);
	free(x_space);
	free(line);

	return 0;
}
// Training SVM with feature vector X and label Y. 
// Each row of X is a feature vector, with corresponding label in Y.
// Return a CV_32F weight Mat
Mat Objectness::trainSVM(CMat &X1f, const vecI &Y, int sT, double C, double bias, double eps)
{
	// Set SVM parameters
	parameter param; {
		param.solver_type = sT; // L2R_L2LOSS_SVC_DUAL;
		param.C = C;
		param.eps = eps; // see setting below
		param.p = 0.1;
		param.nr_weight = 0;
		param.weight_label = NULL;
		param.weight = NULL;
		set_print_string_function(print_null);
		CV_Assert(X1f.rows == Y.size() && X1f.type() == CV_32F);
	}

	// Initialize a problem
	feature_node *x_space = NULL;
	problem prob;{
		prob.l = X1f.rows;
		prob.bias = bias;
		prob.y = Malloc(double, prob.l);
		prob.x = Malloc(feature_node*, prob.l);
		const int DIM_FEA = X1f.cols;
		prob.n = DIM_FEA + (bias >= 0 ? 1 : 0);
		x_space = Malloc(feature_node, (prob.n + 1) * prob.l);
		int j = 0;
		for (int i = 0; i < prob.l; i++){
			prob.y[i] = Y[i];
			prob.x[i] = &x_space[j];
			const float* xData = X1f.ptr<float>(i);
			for (int k = 0; k < DIM_FEA; k++){
				x_space[j].index = k + 1;
				x_space[j++].value = xData[k];
			}
			if (bias >= 0){
				x_space[j].index = prob.n;
				x_space[j++].value = bias;
			}
			x_space[j++].index = -1;
		}
		CV_Assert(j == (prob.n + 1) * prob.l);
	}

	// Training SVM for current problem
	const char*  error_msg = check_parameter(&prob, &param);
	if(error_msg){
		fprintf(stderr,"ERROR: %s\n",error_msg);
		exit(1);
	}
	model *svmModel = train(&prob, &param);
	Mat wMat(1, prob.n, CV_64F, svmModel->w);
	wMat.convertTo(wMat, CV_32F);
	free_and_destroy_model(&svmModel);
	destroy_param(&param);
	free(prob.y);
	free(prob.x);
	free(x_space);
	return wMat;
}
void LVlinear_train(lvError *lvErr, const LVlinear_problem *prob_in, const LVlinear_parameter *param_in, LVlinear_model * model_out){
	try{
		// Input verification: Problem dimensions
		if ((*(prob_in->x))->dimSize != (*(prob_in->y))->dimSize)
			throw LVException(__FILE__, __LINE__, "The problem must have an equal number of labels and feature vectors (x and y).");

		//-- Convert problem
		std::unique_ptr<problem> prob(new problem);
		uint32_t nr_nodes = (*(prob_in->y))->dimSize;
		prob->l = nr_nodes;
		prob->y = (*(prob_in->y))->elt;

		// Create and array of pointers (sparse datastructure)
		std::unique_ptr<feature_node*[]> x(new feature_node*[nr_nodes]);
		prob->x = x.get();

		auto x_in = prob_in->x;
		for (unsigned int i = 0; i < (*x_in)->dimSize; i++){
			// Assign the innermost svm_node array pointers to the array of pointers
			auto xi_in_Hdl = (*x_in)->elt[i];
			x[i] = reinterpret_cast<feature_node*>((*xi_in_Hdl)->elt);
		}

		//-- Convert parameters
		std::unique_ptr<parameter> param(new parameter());
		LVConvertParameter(param_in, param.get());

		// Verify parameters
		const char * param_check = check_parameter(prob.get(), param.get());
		if (param_check != nullptr)
			throw LVException(__FILE__, __LINE__, "Parameter check failed with the following error: " + std::string(param_check));

		// Train model
		model *result = train(prob.get(), param.get());

		// Copy model to LabVIEW memory
		LVConvertModel(result, model_out);

		// Release memory allocated by train
		free_model_content(result);
	}
	catch (LVException &ex) {
		ex.returnError(lvErr);
		// To avoid LabVIEW reading and utilizing bad memory, the dimension sizes of arrays is set to zero
		(*(model_out->label))->dimSize = 0;
		(*(model_out->w))->dimSize = 0;
	}
	catch (std::exception &ex) {
		LVException::returnStdException(lvErr, __FILE__, __LINE__, ex);
		(*(model_out->label))->dimSize = 0;
		(*(model_out->w))->dimSize = 0;
	}
	catch (...) {
		LVException ex(__FILE__, __LINE__, "Unknown exception has occurred");
		ex.returnError(lvErr);
		(*(model_out->label))->dimSize = 0;
		(*(model_out->w))->dimSize = 0;
	}
}
Exemplo n.º 6
0
int main(int argc, char **argv)
{
	char input_file_name[1024];
	char model_file_name[1024];
	const char *error_msg;

	parse_command_line(argc, argv, input_file_name, model_file_name);
	read_problem(input_file_name);
	error_msg = check_parameter(&prob,&param);

	if(error_msg)
	{
		fprintf(stderr,"ERROR: %s\n",error_msg);
		exit(1);
	}
	if( flag_find_C && flag_warm_start)
	{
		fprintf(stderr,"ERROR: Option -C and -i can't both exist\n");
		exit(1);
	}
	if (flag_find_C)
	{
		do_find_parameter_C();
	}
	else if(flag_cross_validation)
	{
		do_cross_validation();
	}
	else
	{
		if(flag_warm_start)
		{
			if(prob.n != initial_model->nr_feature)
				fprintf(stderr,"WARNING: The number of features in the input file does not match that in the initial model\n");
			model_=warm_start_train(&prob, &param, initial_model);
			free_and_destroy_model(&initial_model);
		}
		else
			model_=train(&prob, &param);
		if(save_model(model_file_name, model_))
		{
			fprintf(stderr,"can't save model to file %s\n",model_file_name);
			exit(1);
		}
		free_and_destroy_model(&model_);
	}
	destroy_param(&param);
	free(prob.y);
	free(prob.x);
	free(x_space);
	free(line);

	return 0;
}
Exemplo n.º 7
0
void rt_task4_thread_entry(void* parameter)
{
	extern void start_transmit(void);
 	extern void stop_transmit(void);
	extern uint8_t check_parameter(void);
//	extern uint8_t self_check(void);
//	extern void cpu_usage_init();
	rt_uint32_t ev;
	
	if((GPIO_ReadInputDataBit(GPIOD, GPIO_Pin_3) == 0))
	{
		coil.usRegCoilBuf |= M(0);
	}
	else
	{
		coil.usRegCoilBuf &= ~M(0);
	}
	
	rt_event_init(&key_event, "key_event", RT_IPC_FLAG_FIFO );
	
	while(1)
	{
		if( rt_event_recv( &key_event, FOOT_PUPD | KEY_PUPD, RT_EVENT_FLAG_OR | RT_EVENT_FLAG_CLEAR, RT_WAITING_FOREVER, &ev ) == RT_EOK ) 
		{
			
				rt_thread_delay(6);
				if( (ev & FOOT_PUPD) && (coil.usRegCoilBuf & M(0)) &&(GPIO_ReadInputDataBit(GPIOA, GPIO_Pin_0) == 0)&& \
					(check_parameter()) && (coil.usRegCoilBuf & M(7)))
				{
					start_transmit();
				}
				else
				{
					stop_transmit();
				}
				
				if(ev & KEY_PUPD)
				{
					if((GPIO_ReadInputDataBit(GPIOD, GPIO_Pin_3) == 0))
					{
						coil.usRegCoilBuf |= M(0);
					}
					else
					{
						coil.usRegCoilBuf &= ~M(0);
					}
				}
		}
		rt_event_recv( &key_event, FOOT_PUPD | KEY_PUPD, RT_EVENT_FLAG_OR | RT_EVENT_FLAG_CLEAR, 2, &ev );
		
		rt_thread_delay(20);
	}
}
void LVlinear_cross_validation(lvError *lvErr, const LVlinear_problem *prob_in, const LVlinear_parameter *param_in, const int32_t nr_fold, LVArray_Hdl<double> target_out){
	try{
		// Input verification: Problem dimensions
		if ((*(prob_in->x))->dimSize != (*(prob_in->y))->dimSize)
			throw LVException(__FILE__, __LINE__, "The problem must have an equal number of labels and feature vectors (x and y).");

		// Convert LVsvm_problem to svm_problem
		std::unique_ptr<problem> prob(new problem);
		uint32_t nr_nodes = (*(prob_in->y))->dimSize;
		prob->l = nr_nodes;
		prob->y = (*(prob_in->y))->elt;

		// Create and array of pointers (sparse datastructure)
		std::unique_ptr<feature_node*[]> x(new feature_node*[nr_nodes]);
		prob->x = x.get();

		auto x_in = prob_in->x;
		for (unsigned int i = 0; i < (*x_in)->dimSize; i++){
			// Assign the innermost svm_node array pointers to the array of pointers
			auto xi_in_Hdl = (*x_in)->elt[i];
			x[i] = reinterpret_cast<feature_node*>((*xi_in_Hdl)->elt);
		}

		// Assign parameters to svm_parameter
		std::unique_ptr<parameter> param(new parameter());
		LVConvertParameter(param_in, param.get());

		// Verify parameters
		const char * param_check = check_parameter(prob.get(), param.get());
		if (param_check != nullptr)
			throw LVException(__FILE__, __LINE__, "Parameter check failed with the following error: " + std::string(param_check));

		// Allocate room in target_out
		LVResizeNumericArrayHandle(target_out, nr_nodes);

		// Run cross validation
		cross_validation(prob.get(), param.get(), nr_fold, (*target_out)->elt);
		(*target_out)->dimSize = nr_nodes;
	}
	catch (LVException &ex) {
		ex.returnError(lvErr);
		(*target_out)->dimSize = 0;
	}
	catch (std::exception &ex) {
		LVException::returnStdException(lvErr, __FILE__, __LINE__, ex);
		(*target_out)->dimSize = 0;
	}
	catch (...) {
		LVException ex(__FILE__, __LINE__, "Unknown exception has occurred");
		ex.returnError(lvErr);
		(*target_out)->dimSize = 0;
	}
}
Exemplo n.º 9
0
void LibLinearWrapper::trainModel(Mat labels, Mat trainingData, vector<float>& weight)
{
    CV_Assert(labels.rows == trainingData.rows);
    m_prob.l = labels.rows;
    m_prob.n = trainingData.cols;

    auto num_samples = trainingData.total();
    auto elements = num_samples + m_prob.l * 2;

    vector<double> y(m_prob.l);
    vector<struct feature_node*> x(m_prob.l);
    vector<struct feature_node> x_spaceVec (elements);

    m_prob.y = y.data();
    m_prob.x = x.data();
    auto x_space = x_spaceVec.data();

    m_prob.bias = -1;

    int j = 0;
    for (int i = 0; i<m_prob.l; ++i)
    {
        m_prob.x[i] = &x_space[j];
        m_prob.y[i] = labels.at<float>(i);

        auto row_ptr = trainingData.ptr<float>(i);
        for (int k = 0; k < trainingData.cols; ++k)
        {
            if (row_ptr[k])
            {
                x_space[j].index = k + 1;
                x_space[j].value = row_ptr[k];
                ++j;
            }
        }
        x_space[j++].index = -1;
    }

    auto error_msg = check_parameter(&m_prob, &m_param);
    if (error_msg)
    {
        std::clog << "Error: " << error_msg << std::endl;
        return;
    }
    struct model *model_ = train(&m_prob, &m_param);
    double norm = std::inner_product(model_->w, model_->w + model_->nr_feature, model_->w, 0.0);
    norm = sqrt(norm);

    weight.resize(model_->nr_feature);
    transform(model_->w, model_->w + model_->nr_feature, weight.begin(), [norm](double val) {
        return val / norm;
    });
}
Exemplo n.º 10
0
int count_lines_parameters(FILE *fp) {
    int    n_lines = 0;
    char * line    = NULL;
    size_t n       = 0;
    int    r;
    while(!feof(fp)) {
        r = getline(&line, &n, fp);
        if(r > 0 && !check_parameter(line))
            n_lines++;
    }
    SID_free(SID_FARG line);
    rewind(fp);
    return (n_lines);
}
Exemplo n.º 11
0
Arquivo: train.c Projeto: Joelone/MLEA
int main(int argc, char **argv)
{
	char input_file_name[1024];
	char model_file_name[1024];
	const char *error_msg;

	parse_command_line(argc, argv, input_file_name, model_file_name);
	read_problem(input_file_name);
	error_msg = check_parameter(&prob,&param);

	if(error_msg)
	{
		fprintf(stderr,"Error: %s\n",error_msg);
		exit(1);
	}

	if(flag_cross_validation)
	{
		if (nr_fold <= 10)
		{
			do_cross_validation();
		}
		else
		{
			double cv;
			nr_fold = nr_fold - 10;
			cv =  binary_class_cross_validation(&prob, &param, nr_fold);
			printf("Cross Validation = %g%%\n",100.0*cv);
		}
	}
	else
	{
		model_=train(&prob, &param);
		if(save_model(model_file_name, model_))
		{
			fprintf(stderr,"can't save model to file %s\n",model_file_name);
			exit(1);
		}
		free_and_destroy_model(&model_);
	}
	destroy_param(&param);
	free(prob.y);
	free(prob.x);
	free(prob.W);
	free(x_space);
	free(line);

	return 0;
}
Exemplo n.º 12
0
int main(int argc, char **argv)
{
	char input_file_name[1024];
	char model_file_name[1024];
	const char *error_msg;

	parse_command_line(argc, argv, input_file_name, model_file_name);
	read_problem(input_file_name);
	param.train_file = Malloc(char,1024);
	strcpy(param.train_file, input_file_name);
	error_msg = check_parameter(&prob,&param);

	if(error_msg)
	{
		fprintf(stderr,"ERROR: %s\n",error_msg);
		exit(1);
	}

	if(flag_cross_validation)
	{
		do_cross_validation();
	}
	else
	{
		clock_t start_cpu, end_cpu;
		double cpu_time_used;
     	start_cpu = clock();
		model_=train(&prob, &param);
		end_cpu = clock();
     	cpu_time_used = ((double) (end_cpu - start_cpu)) / CLOCKS_PER_SEC;
		if(save_model(model_file_name, model_))
		{
			fprintf(stderr,"can't save model to file %s\n",model_file_name);
			exit(1);
		}
		free_and_destroy_model(&model_);
	}
	destroy_param(&param);
	free(prob.y);
	free(prob.x);
	free(x_space);
	free(line);

	return 0;
}
Exemplo n.º 13
0
	struct model* main(int argc, char **argv)
	{
		char input_file_name[1024];
		char model_file_name[1024];
		const char *error_msg;

		parse_command_line(argc, argv, input_file_name, model_file_name);
		auto prob = read_problem(input_file_name);
		error_msg = check_parameter(&prob, &param);

		if (error_msg)
		{
			fprintf(stderr, "ERROR: %s\n", error_msg);
			exit(1);
		}

		struct model *pmodel;

		if (flag_find_C)
		{
			do_find_parameter_C(&prob);
		}
		else if (flag_cross_validation)
		{
			do_cross_validation(&prob);
		}
		else
		{
			pmodel = train(&prob, &param);
			/*if (save_model(model_file_name, pmodel))
			{
				fprintf(stderr, "can't save model to file %s\n", model_file_name);
				exit(1);
			}
			free_and_destroy_model(&pmodel);*/
		}
		destroy_param(&param);
		free(prob.y);
		free(prob.x);
		free(x_space);
		free(line);

		return pmodel;
	}
Exemplo n.º 14
0
// Interface function of matlab
// now assume prhs[0]: label prhs[1]: features
int main(int argc, char **argv)
{
	const char *error_msg;
	// fix random seed to have same results for each run
	// (for cross validation)
	srand(1);

	char input_file_name[1024];
	char model_file_name[1024];

   
	parse_command_line(argc, argv, input_file_name, model_file_name);
	read_problem(input_file_name);
	error_msg = check_parameter(&prob,&param);
	if(error_msg)
	{
		fprintf(stderr,"Error: %s\n",error_msg);
		destroy_param(&param);
		free(prob.y);
		free(prob.x);
		free(x_space);
		exit(1);
	}

	if(cross_validation_flag)
	{
		do_cross_validation();
	}
	else
	{
		model_=FGM_train(&prob, &param);
		printf("training is done!\n");
		save_model_poly(model_file_name, model_);
		printf("model is saved!\n");
		destroy_model(model_);
	}
        destroy_param(&param);
		free(prob.y);
		free(prob.x);
		free(x_space);
		
}
Exemplo n.º 15
0
int main(int argc, char *argv[])
{
    int fd = 0;
    int ret = 0;
    struct aiocb my_aiocb = {0};

    check_parameter(argc);

    fd = open(argv[argc-1], O_RDONLY);
    if (fd < 0)
    {
        fprintf(stderr, "open failure\n");
        return EXIT_FAILURE;
    }

    my_aiocb.aio_buf    = malloc(BUFSIZ+1); 
    my_aiocb.aio_fildes = fd;
    my_aiocb.aio_nbytes = BUFSIZ;
    my_aiocb.aio_offset = 0;
    
    ret = aio_read(&my_aiocb);
    if (ret < 0)
    {
        fprintf(stderr, "aio_read failure\n");
        return EXIT_FAILURE;
    }

    while (aio_error(&my_aiocb) == EINPROGRESS);

    if ((ret = aio_return(&my_aiocb)) > 0)
    {
        printf("%s", my_aiocb.aio_buf);
    }
    else
    {
        fprintf(stderr, "aio_return failure\n");
    }

    return EXIT_SUCCESS;
}
Exemplo n.º 16
0
Arquivo: train.cpp Projeto: fgtlss/sol
//void copy_parameter(param_temp,&param)
//{
//	param_t
//}
// Interface function of matlab
// now assume prhs[0]: label prhs[1]: features
int main(int argc, char **argv)
{
	const char *error_msg;
	// fix random seed to have same results for each run
	// (for cross validation)
	srand(1);

	char input_file_name[1024];
	char model_file_name[1024];
	char param_file_name[1024];

    
	int n_flag = 0;
	parse_command_line(argc, argv, input_file_name, model_file_name,n_flag,param_file_name);
	char char_para;
	int length_param = 0;
	double *para_entry; // 
	//n_flag = 1;
	//int para_B[20] = {2, 3, 4, 5, 6, 7, 8, 9, 10, 12,14,16, 18, 20, 25, 30,35,40,45,50};
	int para_B[40] = {2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 24, 25, 26, 30,32, 35, 38, 40, 42, 45, 48, 50, 55, 60, 65,70,75,80, 85, 90, 95, 100, 105, 110, 115, 120};
	//int para_B[32] = { 20, 24, 25, 26, 30,32, 35, 38, 40, 42, 45, 48, 50, 55,  60, 80, 100, 120, 140, 160, 180, 200, 220, 240, 260, 280};

	if (n_flag==1)
	{
		read_parameter(param_file_name, para_entry, char_para, length_param);
	}

	read_problem(input_file_name);
	


	error_msg = check_parameter(&prob,&param);
//parameter *param_temp = new parameter[1];
//copy_parameter(param_temp,&param);
	if(error_msg)
	{
		fprintf(stderr,"Error: %s\n",error_msg);
		exit(1);
	}

	if(cross_validation_flag)
	{
		do_cross_validation();
	}
	else
	{
		if(n_flag==0)
		{
			model_ = FGM_train(&prob, &param);
			printf("training is done!\n");
			save_model_poly(model_file_name, model_);
			printf("model is saved!\n");
			destroy_model(model_);
		}
		else
		{
			int i;
			if (char_para=='C')
			{
				length_param = length_param;
			}else
			{
				length_param = 40;
			}
			for (i=0;i<length_param;i++)
			{
				char param_char[1000];
			    char model_file[1024];
				strcpy(model_file,model_file_name);
				if (char_para=='C')
				{
					param.C = para_entry[i];
					sprintf(param_char, "%.10lf ", para_entry[i]); 
					strcat(model_file,".c.");
					strcat(model_file,param_char);
				    model_=FGM_train(&prob, &param);
				}
				else
				{
					int B = para_B[i];
					param.B = B;
					sprintf(param_char, "%d ", param.B); 
					printf("%d\n ", param.B); 
					strcat(model_file,".B.");
					strcat(model_file,param_char);
				    model_=FGM_train(&prob, &param);
				}
				
				printf("training is done!\n");
				save_model_poly(model_file, model_);
				printf("model is saved!\n");
				destroy_model(model_);
				if(model_->feature_pair>600)
				{
					break;
				}
			}
		}

	}
	if (n_flag==1)
	{
		delete []para_entry;
	}
	
	destroy_param(&param);
	free(prob.y);
	free(prob.x);
	free(x_space);

}
Exemplo n.º 17
0
// Interface function of matlab
// now assume prhs[0]: label prhs[1]: features
void mexFunction( int nlhs, mxArray *plhs[],
		int nrhs, const mxArray *prhs[] )
{
	const char *error_msg;
	// fix random seed to have same results for each run
	// (for cross validation)
	srand(1);

	// Transform the input Matrix to libsvm format
	if(nrhs > 0 && nrhs < 5)
	{
		int err=0;

		if(!mxIsDouble(prhs[0]) || !mxIsDouble(prhs[1])) {
			mexPrintf("Error: label vector and instance matrix must be double\n");
			fake_answer(plhs);
			return;
		}

		if(parse_command_line(nrhs, prhs, NULL))
		{
			exit_with_help();
			destroy_param(&param);
			fake_answer(plhs);
			return;
		}

		if(mxIsSparse(prhs[1]))
			err = read_problem_sparse(prhs[0], prhs[1]);
		else
		{
			mexPrintf("Training_instance_matrix must be sparse\n");
			destroy_param(&param);
			fake_answer(plhs);
			return;
		}

		// train's original code
		error_msg = check_parameter(&prob, &param);

		if(err || error_msg)
		{
			if (error_msg != NULL)
				mexPrintf("Error: %s\n", error_msg);
			destroy_param(&param);
			free(prob.y);
			free(prob.x);
			free(x_space);
			fake_answer(plhs);
			return;
		}

		if(cross_validation_flag)
		{
			double *ptr;
			plhs[0] = mxCreateDoubleMatrix(1, 1, mxREAL);
			ptr = mxGetPr(plhs[0]);
			ptr[0] = do_cross_validation();
		}
		else
		{
			const char *error_msg;

			model_ = train(&prob, &param);
			error_msg = model_to_matlab_structure(plhs, model_);
			if(error_msg)
				mexPrintf("Error: can't convert libsvm model to matrix structure: %s\n", error_msg);
			destroy_model(model_);
		}
		destroy_param(&param);
		free(prob.y);
		free(prob.x);
		free(x_space);
	}
	else
	{
		exit_with_help();
		fake_answer(plhs);
		return;
	}
}
Exemplo n.º 18
0
int main(int argc, char **argv)
{
#ifdef GPU
    int dev = findCudaDevice(argc, (const char **) argv);
    if (dev == -1)
        return 0;

    if (cublasCreate(&handle) != CUBLAS_STATUS_SUCCESS)
    {
        fprintf(stdout, "CUBLAS initialization failed!\n");
        cudaDeviceReset();
        exit(EXIT_FAILURE);
    }
#endif // GPU

	char input_file_name[1024];
	char model_file_name[1024];
	const char *error_msg;

	parse_command_line(argc, argv, input_file_name, model_file_name);
  time_t t1 = clock();
	read_problem(input_file_name);
  time_t t2 = clock();
  printf("reading the input file took %f seconds.\n", float(t2-t1)/CLOCKS_PER_SEC);
	error_msg = check_parameter(&prob,&param);

	if(error_msg)
	{
		fprintf(stderr,"ERROR: %s\n",error_msg);
		exit(1);
	}

	if(flag_cross_validation)
	{
		do_cross_validation();
	}
	else
	{
		model_=train(&prob, &param);
		if(save_model(model_file_name, model_))
		{
			fprintf(stderr,"can't save model to file %s\n",model_file_name);
			exit(1);
		}
		free_and_destroy_model(&model_);
	}
	destroy_param(&param);
	free(prob.y);
	free(prob.x);
	free(x_space);
	free(line);


#ifdef GPU
    cublasDestroy(handle);
    cudaDeviceReset();
#endif // GPU
  printf("reading the input file took %f seconds.\n", float(t2-t1)/CLOCKS_PER_SEC);

	return 0;
}
Exemplo n.º 19
0
cv::Mat_<double> cRegression::__train_regressor(const cv::Mat_<double>& label_vec, const cv::Mat_<int>& instance_mat)
{
	void(*print_func)(const char*) = &print_null;
	const char *error_msg;

	struct parameter param;
	struct problem   problem;
	struct feature_node *x_space = NULL;

	srand(1);
	// std::cout << "initialize liblinear parameter." << std::endl;
	param.solver_type = L2R_L2LOSS_SVR_DUAL;
	param.C = 1.0 / (double)label_vec.rows;
	param.eps = 0.1;
	param.p = 0;
	param.nr_weight = 0;
	param.weight_label = NULL;
	param.weight = NULL;
	// std::cout << "initialize liblinear parameter finished." << std::endl;
	set_print_string_function(print_func);

	std::vector<int>*  prob_x = NULL;
	prob_x = new std::vector<int>[label_vec.rows]; // number of samples = label_vec.rows

	size_t nzcount = 0;
	// std::cout << "copy feature." << std::endl;
	for (int i = 0; i < instance_mat.rows; ++i) {
	    for (int j = 0; j < instance_mat.cols; ++j) {
		    int elem = instance_mat(i, j);
			if (elem != 0) {
				prob_x[i].push_back(j);
				++nzcount;
			}
		}
	}
	// std::cout << "copy feature finished." << std::endl;

	//sort the vector
	for (int i = 0; i < label_vec.rows; i++){
		std::sort(prob_x[i].begin(), prob_x[i].end());
	}

	problem.l = label_vec.rows;
	problem.n = instance_mat.cols;
	problem.bias = -1;

	int elements = (int)(nzcount + problem.l);

	problem.y = Malloc(double, problem.l);
	problem.x = Malloc(struct feature_node *, problem.l);
	x_space = Malloc(struct feature_node, elements);

	int j = 0;
	for (int i = 0; i < problem.l; i++){
		problem.y[i] = label_vec(i, 0);
		problem.x[i] = &x_space[j];

		for (int k = 0; k < prob_x[i].size(); k++){
			x_space[j].index = prob_x[i][k] + 1;
			x_space[j].value = 1;
			j++;
		}
		x_space[j++].index = -1;
	}

	delete[] prob_x;

	error_msg = check_parameter(&problem, &param);
	if (error_msg){
		fprintf(stderr, "ERROR: %s\n", error_msg);
	}

	// std::cout << "train model." << std::endl;
	struct model *model = NULL;
	model = train(&problem, &param);
	// std::cout << "train model finished." << std::endl;

	cv::Mat_<double> weight = cv::Mat::zeros(model->nr_feature, 1, CV_64FC1);
	for (int i = 0; i < model->nr_feature; i++){
		weight(i, 0) = model->w[i];
		// std::cout << weight(i, 0) << " "; // std::endl;
	}


	free_and_destroy_model(&model);
	destroy_param(&param);

	free((void*)(problem.y));
	free((void*)(problem.x));
	free((void*)(x_space));
	return weight;
}
Exemplo n.º 20
0
bool lt(ast * n1, ast * n2) {
    unsigned num;
 start:
    if (n1 == n2)
        return false;
    check_value(n1->get_kind(), n2->get_kind());
    switch(n1->get_kind()) {
    case AST_SORT:
        check_symbol(to_sort(n1)->get_name(), to_sort(n2)->get_name());
        check_value(to_sort(n1)->get_num_parameters(), to_sort(n2)->get_num_parameters());
        num = to_sort(n1)->get_num_parameters();
        SASSERT(num > 0);
        for (unsigned i = 0; i < num; i++) {
            parameter p1 = to_sort(n1)->get_parameter(i);
            parameter p2 = to_sort(n2)->get_parameter(i);
            check_parameter(p1, p2);
        }
        UNREACHABLE();
        return false;
    case AST_FUNC_DECL:
        check_symbol(to_func_decl(n1)->get_name(), to_func_decl(n2)->get_name());
        check_value(to_func_decl(n1)->get_arity(), to_func_decl(n2)->get_arity());
        check_value(to_func_decl(n1)->get_num_parameters(), to_func_decl(n2)->get_num_parameters());
        num = to_func_decl(n1)->get_num_parameters();
        for (unsigned i = 0; i < num; i++) {
            parameter p1 = to_func_decl(n1)->get_parameter(i);
            parameter p2 = to_func_decl(n2)->get_parameter(i);
            check_parameter(p1, p2);
        }
        num = to_func_decl(n1)->get_arity();
        for (unsigned i = 0; i < num; i++) {
            ast * d1 = to_func_decl(n1)->get_domain(i);
            ast * d2 = to_func_decl(n2)->get_domain(i);
            check_ast(d1, d2);
        }
        n1 = to_func_decl(n1)->get_range();
        n2 = to_func_decl(n2)->get_range();
        goto start;
    case AST_APP:
        check_value(to_app(n1)->get_num_args(), to_app(n2)->get_num_args());
        check_value(to_app(n1)->get_depth(), to_app(n2)->get_depth());
        check_ast(to_app(n1)->get_decl(), to_app(n2)->get_decl());
        num = to_app(n1)->get_num_args();
        for (unsigned i = 0; i < num; i++) {
            expr * arg1 = to_app(n1)->get_arg(i);
            expr * arg2 = to_app(n2)->get_arg(i);
            check_ast(arg1, arg2);
        }
        UNREACHABLE();
        return false;
    case AST_QUANTIFIER:
        check_bool(to_quantifier(n1)->is_forall(), to_quantifier(n2)->is_forall());
        check_value(to_quantifier(n1)->get_num_decls(), to_quantifier(n2)->get_num_decls());
        check_value(to_quantifier(n1)->get_num_patterns(), to_quantifier(n2)->get_num_patterns());
        check_value(to_quantifier(n1)->get_num_no_patterns(), to_quantifier(n2)->get_num_no_patterns());
        check_value(to_quantifier(n1)->get_weight(), to_quantifier(n2)->get_weight());
        num = to_quantifier(n1)->get_num_decls();
        for (unsigned i = 0; i < num; i++) {
            check_symbol(to_quantifier(n1)->get_decl_name(i), to_quantifier(n2)->get_decl_name(i));
            check_ast(to_quantifier(n1)->get_decl_sort(i), to_quantifier(n2)->get_decl_sort(i));
        }
        num = to_quantifier(n1)->get_num_patterns();
        for (unsigned i = 0; i < num; i++) {
            check_ast(to_quantifier(n1)->get_pattern(i), to_quantifier(n2)->get_pattern(i));
        }
        num = to_quantifier(n1)->get_num_no_patterns();
        for (unsigned i = 0; i < num; i++) {
            check_ast(to_quantifier(n1)->get_no_pattern(i), to_quantifier(n2)->get_no_pattern(i));
        }
        n1 = to_quantifier(n1)->get_expr();
        n2 = to_quantifier(n2)->get_expr();
        goto start;
    case AST_VAR:
        check_value(to_var(n1)->get_idx(), to_var(n2)->get_idx());
        n1 = to_var(n1)->get_sort();
        n2 = to_var(n2)->get_sort();
        goto start;
    default:
        UNREACHABLE();
        return false;
    }
}
Exemplo n.º 21
0
bool QPredictLinearLearner::train(QPredictDocumentList &doc_list)
{
    uint32_t cnt;
	struct problem prob;
    struct feature_node *x_space;
    int32_t max_index;
    size_t num_space;
    const char * error_msg;
    int i, j;

    cnt = 1;
    prob.l = doc_list.size();

    prob.y = new int[prob.l];
    memset(prob.y, 0, sizeof(int) * prob.l);
    prob.x = new struct feature_node *[prob.l];
    memset(prob.x, 0, sizeof(struct feature_node *) * prob.l);

    prob.bias = m_bias;

    num_space = 0;

    QPredictDocumentListIter  end_doc_it = doc_list.end();
    for (QPredictDocumentListIter doc_it = doc_list.begin(); doc_it != end_doc_it; ++doc_it) {
        num_space += doc_it->feature_list.size() + 1;
    }

    x_space = new struct feature_node[num_space + prob.l];
    memset(x_space, 0, sizeof(struct feature_node) * (num_space + prob.l));

    max_index = 0;

    for (i = 0, j = 0; i < prob.l; i++) {
        std::vector<uint32_t>::iterator termIt;

        prob.x[i] = &x_space[j];
        prob.y[i] = doc_list[i].class_index;

        QPredictFeatureList &feature_list = doc_list[i].feature_list;
        // sort feature
        sort(feature_list.begin(), feature_list.end(), QPredictFeature::feature_compare);

        const QPredictFeatureListIter &feature_end_it = feature_list.end();
        for (QPredictFeatureListIter feature_it = feature_list.begin(); feature_it != feature_end_it; ++feature_it) {
            x_space[j].index = feature_it->id;
            x_space[j].value = feature_it->value;

            ++j;
        }

        if (j >= 1 && x_space[j - 1].index > max_index)
            max_index = x_space[j - 1].index;

        if(prob.bias >= 0)
            x_space[j++].value = prob.bias;

        x_space[j++].index = -1;

        cnt++;
    }

    if(prob.bias >= 0) {
        prob.n = max_index + 1;
        for(i = 1;i < prob.l; i++)
            (prob.x[i]-2)->index = prob.n;
        x_space[j-2].index = prob.n;
    } else {
        prob.n = max_index;
    }

    if ((error_msg = check_parameter(&prob, &m_param))) {
        //
        std::cerr << "error " << error_msg << std::endl;
    }

    m_model = ::train(&prob, &m_param);

    /*/ cross validation
	int total_correct = 0;
	int *target = (int*) malloc(sizeof(int) * prob.l);

	cross_validation(&prob,&m_param,10,target);

	for(int i=0;i<prob.l;i++)
		if(target[i] == prob.y[i])
			++total_correct;
	fprintf(stderr, "Cross Validation Accuracy = %g%%\n",100.0*total_correct/prob.l);

	free(target);
    */
    //destroy_param(&m_param);

    delete []prob.y;
    delete []prob.x;
    delete []x_space;

    return true;
}
Exemplo n.º 22
0
// Interface function of matlab
// now assume prhs[0]: label prhs[1]: features
void mexFunction( int nlhs, mxArray *plhs[],
		int nrhs, const mxArray *prhs[] )
{
	const char *error_msg;
	// fix random seed to have same results for each run
	// (for cross validation)
	srand(1);

	// Transform the input Matrix to libsvm format
	if(nrhs > 2 && nrhs <= 6)
	{
		int err=0;

		if(!mxIsDouble(prhs[0]) || !mxIsDouble(prhs[1]) ) {
			mexPrintf("Error: weight vector, label vector matrix must be double\n");
			fake_answer(plhs);
			return;
		}
		if(!mxIsSingle(prhs[2])) {
			mexPrintf("Error: instance matrix must be single\n");
			fake_answer(plhs);
			return;
		}

		if(parse_command_line(nrhs, prhs, NULL))
		{
			exit_with_help();
			destroy_param(&param);
			fake_answer(plhs);
			return;
		}

#ifdef _DENSE_REP
		if(!mxIsSparse(prhs[2]))
			err = read_problem_sparse(prhs[0], prhs[1],prhs[2]);
		else
		{
			mexPrintf("Training_instance_matrix must be dense\n");
			destroy_param(&param);
			fake_answer(plhs);
			return;
		}
#else
		if(mxIsSparse(prhs[2]))
			err = read_problem_sparse(prhs[0], prhs[1],prhs[2]);
		else
		{
			mexPrintf("Training_instance_matrix must be sparse\n");
			destroy_param(&param);
			fake_answer(plhs);
			return;
		}
#endif

                // xren: delete the input instance matrix to free up space
                if (nrhs==6) {
                        mxArray* var=(mxArray*)prhs[5];
                        int status=mexCallMATLAB(0,NULL,1, &var, "clear");
                        if (status!=0) mexPrintf("Failed to delete variable %s\n",mxArrayToString(prhs[5]));
                        //mxDestroyArray( (mxArray*)prhs[1] );
                }

		// train's original code
		error_msg = check_parameter(&prob, &param);

		if(err || error_msg)
		{
			if (error_msg != NULL)
				mexPrintf("Error: %s\n", error_msg);
			destroy_param(&param);
			free(prob.y);
			free(prob.x);
                        if (!use_existing_space)
			free(x_space);
			fake_answer(plhs);
			return;
		}

		if(cross_validation_flag)
		{
			double *ptr;
			plhs[0] = mxCreateDoubleMatrix(1, 1, mxREAL);
			ptr = mxGetPr(plhs[0]);
			ptr[0] = do_cross_validation();
		}
		else
		{
			const char *error_msg;

			model_ = train(&prob, &param);
			error_msg = model_to_matlab_structure(plhs, model_);
			if(error_msg)
				mexPrintf("Error: can't convert libsvm model to matrix structure: %s\n", error_msg);
			free_and_destroy_model(&model_);
		}
		destroy_param(&param);
		free(prob.y);
		free(prob.x);
		free(prob.W);
                if (!use_existing_space)
		free(x_space);
	}
	else
	{
		exit_with_help();
		fake_answer(plhs);
		return;
	}
}
Exemplo n.º 23
0
int grab_next_line_parameter(FILE *fp, char **line, size_t *n) {
    int rval = grab_next_line(fp, line, n);
    while(!feof(fp) && !check_parameter(*line))
        rval = grab_next_line(fp, line, n);
    return (rval);
}
Exemplo n.º 24
0
/* -------------------------------------------------------------
 * MAIN:
 * Check all parameters passed on the command line, calculate
 * the date to be displayed, and display it.
 * ------------------------------------------------------------- */
int main( int argc, char **argv, char **envp ) {
   struct tm *time_var;
   time_t time_number;
   signed long time_offset;
   char *ptr;
   int i, date_override;
   char saved_command[MAX_PARM_LEN+1];
   char saved_format[MAX_MASK_LEN+1];
   char saved_startdate_override[13]; /* YYYYMMDDHHMM_ */

   /*
    * Check to see what command line parameters we have
    */
   if (argc < 2) {
      printf( "%s: (c)Mark Dickinson, 2001\n", argv[0] );
      show_syntax();
      exit( 1 );
   }

   time_offset = 0;     /* default, and start point for adjustments */
   date_override = 0;   /* use current system date and time */
   strcpy(saved_format,"YYYYMMDD"); /* default */
   i = 1;
   /* use a while loop instead of a for loop as we may
    * increment the counter ourselves */
   while ( i < argc ) {
      ptr = argv[i];
      i++;
	  if (i >= argc) {
		 printf( "Missing value for %s\n", ptr );
		 exit( 1 );
      }
      strncpy( saved_command, ptr, MAX_PARM_LEN ); 
      ptr = argv[i];
      if (strncmp("-format",saved_command,7) == 0) {
         validate_format( ptr, saved_format );
      }
	  else if (strncmp("-workingdate",saved_command,12) == 0) {
         date_override = 1;
		 strncpy( saved_startdate_override, ptr, 12 ); /* YYYYMMDDHHMM */
      }
      else {
         time_offset = time_offset + check_parameter( saved_command, ptr );
      }
      i++;
   }
    
   /*
    * Work out the new time and print the result.
    */
   if (date_override == 1) {
      /* have to get the dst flag setting for this */
      time_number = time(0);
      time_var = localtime( &time_number );
	  /* then workout the callers passed time */
      time_number = make_time( (char *)&saved_startdate_override, time_var->tm_isdst );
   }
   else {
      time_number = time(0);     /* time now in seconds from 00:00, Jan 1 1970 */
   }
   time_number = time_number + time_offset;
   if (strcmp("CTIME",saved_format) == 0) {
      printf( "%s", ctime( &time_number ) );
   }
   else {
     time_var = localtime( &time_number );   
     print_time( time_var, saved_format ); 
   }
   exit( 0 );
} /* end main */
Exemplo n.º 25
0
//---------------------------- global variables -------------------------------
int main(int argc, char **argv)
{
	char input_file_name[1024];
	char model_file_name[1024];
	const char *error_msg;
	
#ifdef FIGURE56
	char test_file_name[1024];
	parse_command_line(argc, argv, input_file_name, test_file_name);
#else
	parse_command_line(argc, argv, input_file_name, model_file_name);//initialize global struct param, according to commond line 
	//_parse_command_line(argc, argv, input_file_name, model_file_name);//initialize global struct param, according to commond line 
#endif
	read_problem(input_file_name);//get all possible information about the train file into global struct prob
#ifdef FIGURE56
	read_problem_test(test_file_name);
#endif
	error_msg = check_parameter(&prob,&param);

	if(error_msg)
	{
		fprintf(stderr,"ERROR: %s\n",error_msg);
		exit(1);
	}
	//	struct model
//{
//	struct parameter param;
//	int nr_class;		/* number of classes */
//	int nr_feature;
//	double *w;
//	int *label;		/* label of each class */
//};
//	model_=train(&prob, &param);
//--------apply memory for V matrix--------------
	int i=0;
	double * p = Malloc(double,param.col_size * prob.l);
	//srand( (unsigned)time( NULL ) );  //种子函数
	for (i=0;i<param.col_size * prob.l;i++)
	{		
		p[i]=rand()/(RAND_MAX+1.0);  //产生随机数的函数
		//p[i]=rand();
	}
	double ** v_pp = Malloc(double* ,prob.l);
	param.v_pp = v_pp;
	
	for (i=0;i<prob.l;i++)
		param.v_pp[i] = &p[param.col_size * i];
	model_=_train(&prob, &param);

#ifdef FIGURE56
#else
	if(save_model(model_file_name, model_))
	{
		fprintf(stderr,"can't save model to file %s\n",model_file_name);
		exit(1);
	}
#endif
	free_and_destroy_model(&model_);
	destroy_param(&param);
	free(prob.y);
	free(prob.x);
	free(prob.query);
	free(x_space);
	////////free the variable
	free(v_pp);
	free(p);
#ifdef FIGURE56
	free(probtest.y);
	free(probtest.x);
	free(x_spacetest);
#endif
	free(line);
	return 0;
}
Exemplo n.º 26
0
void LVlinear_train(lvError *lvErr, const LVlinear_problem *prob_in, const LVlinear_parameter *param_in, LVlinear_model * model_out){
	try{
		// Input verification: Nonempty problem
		if (prob_in->x == nullptr || (*(prob_in->x))->dimSize == 0)
			throw LVException(__FILE__, __LINE__, "Empty problem passed to liblinear_train.");

		// Input verification: Problem dimensions
		if ((*(prob_in->x))->dimSize != (*(prob_in->y))->dimSize)
			throw LVException(__FILE__, __LINE__, "The problem must have an equal number of labels and feature vectors (x and y).");

		uint32_t nr_nodes = (*(prob_in->y))->dimSize;

		// Input validation: Number of feature vectors too large (exceeds max signed int)
		if(nr_nodes > INT_MAX)
			throw LVException(__FILE__, __LINE__, "Number of feature vectors too large (grater than " + std::to_string(INT_MAX) + ")");

		//-- Convert problem
		auto prob = std::make_unique<problem>();
		prob->l = nr_nodes;
		prob->y = (*(prob_in->y))->elt;
		prob->n = 0; // Calculated later
		prob->bias = prob_in->bias;

		// Create and array of pointers (sparse datastructure)
		auto x = std::make_unique<feature_node*[]>(nr_nodes);
		prob->x = x.get();

		auto x_in = prob_in->x;
		for (unsigned int i = 0; i < (*x_in)->dimSize; i++){
			// Assign the innermost svm_node array pointers to the array of pointers
			auto xi_in_Hdl = (*x_in)->elt[i];
			x[i] = reinterpret_cast<feature_node*>((*xi_in_Hdl)->elt);

			// Input validation: Final index -1?
			if ((*xi_in_Hdl)->elt[(*xi_in_Hdl)->dimSize - 1].index != -1)
				throw LVException(__FILE__, __LINE__, "The index of the last element of each feature vector needs to be -1 (liblinear_train).");

			// Calculate the max index
			// This detail is not exposed in LabVIEW, as setting the wrong value causes a crash
			// Second to last element should contain the max index for that feature vector (as they are in ascending order).
			auto secondToLast = (*xi_in_Hdl)->dimSize - 2; // Ignoring -1 index
			auto largestIndex = (*xi_in_Hdl)->elt[secondToLast].index;
			if (secondToLast >= 0 && largestIndex > prob->n)
				prob->n = largestIndex;
		}

		//-- Convert parameters
		auto param = std::make_unique<parameter>();
		LVConvertParameter(*param_in, *param);

		// Verify parameters
		const char * param_check = check_parameter(prob.get(), param.get());
		if (param_check != nullptr)
			throw LVException(__FILE__, __LINE__, "Parameter check failed with the following error: " + std::string(param_check));

		// Train model
		model *result = train(prob.get(), param.get());

		// Copy model to LabVIEW memory
		LVConvertModel(*result, *model_out);

		// Release memory allocated by train
		free_model_content(result);
	}
	catch (LVException &ex) {
		(*(model_out->label))->dimSize = 0;
		(*(model_out->w))->dimSize = 0;
		(*(model_out->param).weight)->dimSize = 0;
		(*(model_out->param).weight_label)->dimSize = 0;

		ex.returnError(lvErr);
	}
	catch (std::exception &ex) {
		(*(model_out->label))->dimSize = 0;
		(*(model_out->w))->dimSize = 0;
		(*(model_out->param).weight)->dimSize = 0;
		(*(model_out->param).weight_label)->dimSize = 0;

		LVException::returnStdException(lvErr, __FILE__, __LINE__, ex);
	}
	catch (...) {
		(*(model_out->label))->dimSize = 0;
		(*(model_out->w))->dimSize = 0;
		(*(model_out->param).weight)->dimSize = 0;
		(*(model_out->param).weight_label)->dimSize = 0;

		LVException ex(__FILE__, __LINE__, "Unknown exception has occurred");
		ex.returnError(lvErr);

	}
}
Exemplo n.º 27
0
void mexFunction( int nlhs, mxArray *plhs[],
		int nrhs, const mxArray *prhs[] )
{
	const char *error_msg;
	srand(1);

	if(nrhs == 7) /* force alphas_in and w_in to be initialized */
	{
		int err=0;

		if(!mxIsClass(prhs[0], "single") || !mxIsClass(prhs[1], "single") || !mxIsClass(prhs[4], "single") || !mxIsClass(prhs[4], "single") || !mxIsClass(prhs[6], "single")) {
			mexPrintf("Error: label vector, instance matrix and alphas_in must be float\n");
			fake_answer(plhs);
			return;
		}

		if(parse_command_line(nrhs, prhs, NULL))
		{
			exit_with_help();
			destroy_param(&param);
			fake_answer(plhs);
			return;
		}
#ifdef _DENSE_REP
		if(!mxIsSparse(prhs[1]))
			err = read_problem_sparse(prhs[0], prhs[1], prhs[4], prhs[5], prhs[6]);
		else
		{
			mexPrintf("Training_instance_matrix must be dense\n");
			destroy_param(&param);
			fake_answer(plhs);
			return;
		}
#else
		if(mxIsSparse(prhs[1]))
			err = read_problem_sparse(prhs[0], prhs[1], prhs[4]);
		else
		{
			mexPrintf("Training_instance_matrix must be sparse\n");
			destroy_param(&param);
			fake_answer(plhs);
			return;
		}
#endif

		error_msg = check_parameter(&prob, &param);

		if(err || error_msg)
		{
			if (error_msg != NULL)
				mexPrintf("Error: %s\n", error_msg);
			destroy_param(&param);
			free(prob.y);
			free(prob.x);
            free(prob.alphas_in);
            free(prob.w_in);
			/*free(x_space);*/
			fake_answer(plhs);
			return;
		}
        
		if(cross_validation_flag)
		{
			float *ptr;
			plhs[0] = mxCreateDoubleMatrix(1, 1, mxREAL);
			ptr = (float*) mxGetPr(plhs[0]);
			ptr[0] = do_cross_validation();
		}
		else
		{            
			const char *error_msg;
			model_ = train(&prob, &param);            
            
			error_msg = model_to_matlab_structure(plhs, model_);            
            
			if(error_msg)
				mexPrintf("Error: can't convert libsvm model to matrix structure: %s\n", error_msg);
            
			destroy_model(model_);
		}
		destroy_param(&param);
		free(prob.y);
		free(prob.x);
        free(prob.alphas_in);
        free(prob.w_in);
		/*free(x_space);*/
	}
	else
	{
		exit_with_help();
		fake_answer(plhs);
		return;
	}
}
Exemplo n.º 28
0
void BoostCart::GlobalRegression(const vector<Mat_<int> >& lbf, const Mat_<double>& shape_residual) {
  Config& c = Config::GetInstance();
  const int landmark_n = c.landmark_n;
  const int n = lbf.size();
  const int m = K; // true size of local binary feature
  const int f = m*carts[0].leafNum; // full size of local binary feature
  vector<int> idx;
  // prepare linear regression X, Y
  struct feature_node** X = (struct feature_node**)malloc(n*sizeof(struct feature_node*));
  double** Y = (double**)malloc(2 * landmark_n*sizeof(double*));
  for (int i = 0; i < n; i++) {
    X[i] = (struct feature_node*)malloc((m + 1)*sizeof(struct feature_node));
    for (int j = 0; j < m; j++) {
      X[i][j].index = lbf[i](0, j) + 1; // index starts from 1
      X[i][j].value = 1.;
    }
    X[i][m].index = -1;
    X[i][m].value = -1.;
  }
  for (int i = 0; i < landmark_n; i++) {
    Y[2 * i] = (double*)malloc(n*sizeof(double));
    Y[2 * i + 1] = (double*)malloc(n*sizeof(double));
    for (int j = 0; j < n; j++) {
      Y[2 * i][j] = shape_residual(j, 2 * i);
      Y[2 * i + 1][j] = shape_residual(j, 2 * i + 1);
    }
  }
  // train every landmark
  struct problem prob;
  struct parameter param;
  prob.l = n;
  prob.n = f;
  prob.x = X;
  prob.bias = -1;
  param.solver_type = L2R_L2LOSS_SVR_DUAL;
  param.C = 1. / n;
  param.p = 0;
  param.eps = 0.0001;

  #pragma omp parallel for
  for (int i = 0; i < landmark_n; i++) {
    struct problem prob_ = prob;
    prob_.y = Y[2 * i];
    check_parameter(&prob_, &param);
    struct model *model = train(&prob_, &param);
    for (int j = 0; j < f; j++) w(j, 2 * i) = get_decfun_coef(model, j + 1, 0);
    freeModel(model);

    prob_.y = Y[2 * i + 1];
    check_parameter(&prob_, &param);
    model = train(&prob_, &param);
    for (int j = 0; j < f; j++) w(j, 2 * i + 1) = get_decfun_coef(model, j + 1, 0);
    freeModel(model);
  }

  // free
  for (int i = 0; i < n; i++) free(X[i]);
  for (int i = 0; i < 2 * landmark_n; i++) free(Y[i]);
  free(X);
  free(Y);
}
int main(int argc, char *argv[])
{
	return check_parameter(NULL);
}
int main(int argc, char **argv)
{
  const char *error_msg;
  parse_command_line(argc, argv); // also load data
  error_msg = check_parameter(prob,&param);
  
  if(error_msg)
    {
      fprintf(stderr,"Error: %s\n",error_msg);
      exit(1);
    }
	

  std::vector< std::pair<double, double> > test_errors(nb_runs);
  std::vector< std::pair<double, double> > train_errors(nb_runs);
  double trn_mean=0;
  double tst_mean=0;
  double mse_trn_mean=0;
  double mse_tst_mean=0;
  int *start = NULL;

  // perform runs
  for (int run=0; run<nb_runs; run++)
    {

      if ((trnsz>=prob->l) || (trnsz<=0))
	{
	  fprintf(stderr,"\nRun %d (from 0 to %d)\n", run, prob->l-1);

	  //train
	  model_=train(prob, &param);
	  
	  // test
	  test_errors[run]=do_predict(tprob, model_);
	  train_errors[run]=do_predict(prob, model_);
	}
      else
	{
          // select all the splits before optimizing
          if(run == 0)
            {
              start = Malloc(int,nb_runs); 
              for (int run2=0; run2<nb_runs; run2++)
                start[run2] = (rand() % (prob->l-trnsz));
            }
	  // select examples
	  fprintf(stderr,"\nRun %d (from %d to %d)\n", run, start[run], start[run]+trnsz-1);
	  struct problem* subprob=extract_subprob(prob, start[run], trnsz);
	  
	  //train
	  model_=train(subprob, &param);
	  
	  // test
	  test_errors[run]=do_predict(tprob, model_);
	  train_errors[run]=do_predict(subprob, model_);
	  free(subprob->y);
	  free(subprob->x);
	}

      tst_mean+=test_errors[run].first;
      printf("Test  classification ERROR = %g\n",test_errors[run].first);
      trn_mean+=train_errors[run].first;
      printf("Train classification ERROR = %g\n",train_errors[run].first);

      mse_tst_mean+=test_errors[run].second;
      printf("Test  normalized ACCURACY (ET requirement) = %g\n",test_errors[run].second);
      mse_trn_mean+=train_errors[run].second;
      printf("Train normalized ACCURACY (ET requirement) = %g\n",train_errors[run].second);

      //destroy model
      free_and_destroy_model(&model_);
      destroy_param(&param);
      
    }