示例#1
0
int main(int argc, char **argv) { 
 	
 	srand ( time(NULL) );
	try {
		CMDLine cmdline(argc, argv);
		cout << "----------------------------------------------------------------------------" << endl;
		cout << "libFM" << endl;
		cout << "  Version: 1.40" << endl;
		cout << "  Author:  Steffen Rendle, [email protected]" << endl;
		cout << "  WWW:     http://www.libfm.org/" << endl;
		cout << "  License: Free for academic use. See license.txt." << endl;
		cout << "----------------------------------------------------------------------------" << endl;
		
		const string param_task		= cmdline.registerParameter("task", "r=regression, c=binary classification [MANDATORY]");
		const string param_meta_file	= cmdline.registerParameter("meta", "filename for meta information about data set");
		const string param_train_file	= cmdline.registerParameter("train", "filename for training data [MANDATORY]");
		const string param_test_file	= cmdline.registerParameter("test", "filename for test data [MANDATORY]");
		const string param_val_file	= cmdline.registerParameter("validation", "filename for validation data (only for SGDA)");
		const string param_out		= cmdline.registerParameter("out", "filename for output");

		const string param_dim		= cmdline.registerParameter("dim", "'k0,k1,k2': k0=use bias, k1=use 1-way interactions, k2=dim of 2-way interactions; default=1,1,8");
		const string param_regular		= cmdline.registerParameter("regular", "'r0,r1,r2' for SGD and ALS: r0=bias regularization, r1=1-way regularization, r2=2-way regularization");
		const string param_init_stdev	= cmdline.registerParameter("init_stdev", "stdev for initialization of 2-way factors; default=0.1");
		const string param_num_iter	= cmdline.registerParameter("iter", "number of iterations; default=100");
		const string param_learn_rate	= cmdline.registerParameter("learn_rate", "learn_rate for SGD; default=0.1");

		const string param_method		= cmdline.registerParameter("method", "learning method (SGD, SGDA, ALS, MCMC); default=MCMC");

		const string param_verbosity	= cmdline.registerParameter("verbosity", "how much infos to print; default=0");
		const string param_r_log		= cmdline.registerParameter("rlog", "write measurements within iterations to a file; default=''");
		const string param_help            = cmdline.registerParameter("help", "this screen");

		const string param_relation	= cmdline.registerParameter("relation", "BS: filenames for the relations, default=''");

		const string param_cache_size = cmdline.registerParameter("cache_size", "cache size for data storage (only applicable if data is in binary format), default=infty");


		const string param_do_sampling	= "do_sampling";
		const string param_do_multilevel	= "do_multilevel";
		const string param_num_eval_cases  = "num_eval_cases";

		if (cmdline.hasParameter(param_help) || (argc == 1)) {
			cmdline.print_help();
			return 0;
		}
		cmdline.checkParameters();

		if (! cmdline.hasParameter(param_method)) { cmdline.setValue(param_method, "mcmc"); }
		if (! cmdline.hasParameter(param_init_stdev)) { cmdline.setValue(param_init_stdev, "0.1"); }
		if (! cmdline.hasParameter(param_dim)) { cmdline.setValue(param_dim, "1,1,8"); }

		if (! cmdline.getValue(param_method).compare("als")) { // als is an mcmc without sampling and hyperparameter inference
			cmdline.setValue(param_method, "mcmc");
			if (! cmdline.hasParameter(param_do_sampling)) { cmdline.setValue(param_do_sampling, "0"); }
			if (! cmdline.hasParameter(param_do_multilevel)) { cmdline.setValue(param_do_multilevel, "0"); }
		} 

		// (1) Load the data
		cout << "Loading train...\t" << endl;
		Data train(
			cmdline.getValue(param_cache_size, 0),
			! (!cmdline.getValue(param_method).compare("mcmc")), // no original data for mcmc
			! (!cmdline.getValue(param_method).compare("sgd") || !cmdline.getValue(param_method).compare("sgda")) // no transpose data for sgd, sgda
		);
		train.load(cmdline.getValue(param_train_file));
		if (cmdline.getValue(param_verbosity, 0) > 0) { train.debug(); }

		cout << "Loading test... \t" << endl;
		Data test(
			cmdline.getValue(param_cache_size, 0),
			! (!cmdline.getValue(param_method).compare("mcmc")), // no original data for mcmc
			! (!cmdline.getValue(param_method).compare("sgd") || !cmdline.getValue(param_method).compare("sgda")) // no transpose data for sgd, sgda
		);
		test.load(cmdline.getValue(param_test_file));
		if (cmdline.getValue(param_verbosity, 0) > 0) { test.debug(); }

		Data* validation = NULL;
		if (cmdline.hasParameter(param_val_file)) {
			if (cmdline.getValue(param_method).compare("sgda")) {
				cout << "WARNING: Validation data is only used for SGDA. The data is ignored." << endl;
			} else {
				cout << "Loading validation set...\t" << endl;
				validation = new Data(
					cmdline.getValue(param_cache_size, 0),
					! (!cmdline.getValue(param_method).compare("mcmc")), // no original data for mcmc
					! (!cmdline.getValue(param_method).compare("sgd") || !cmdline.getValue(param_method).compare("sgda")) // no transpose data for sgd, sgda
				);
				validation->load(cmdline.getValue(param_val_file));
				if (cmdline.getValue(param_verbosity, 0) > 0) { validation->debug(); }
			}
		}

		DVector<RelationData*> relation;
		// (1.2) Load relational data
		{
			vector<string> rel = cmdline.getStrValues(param_relation);
		
			cout << "#relations: " << rel.size() << endl;
			relation.setSize(rel.size());
			train.relation.setSize(rel.size());
			test.relation.setSize(rel.size());
			for (uint i = 0; i < rel.size(); i++) {
				 relation(i) = new RelationData(
					cmdline.getValue(param_cache_size, 0),
					! (!cmdline.getValue(param_method).compare("mcmc")), // no original data for mcmc
					! (!cmdline.getValue(param_method).compare("sgd") || !cmdline.getValue(param_method).compare("sgda")) // no transpose data for sgd, sgda
				);
				relation(i)->load(rel[i]);
				train.relation(i).data = relation(i);
				test.relation(i).data = relation(i);
				train.relation(i).load(rel[i] + ".train", train.num_cases);
				test.relation(i).load(rel[i] + ".test", test.num_cases);
			}
		}
		
		// (1.3) Load meta data
		cout << "Loading meta data...\t" << endl;
		
		// (main table)
		uint num_all_attribute = max(train.num_feature, test.num_feature);
		if (validation != NULL) {
			num_all_attribute = max(num_all_attribute, (uint) validation->num_feature);
		}
		DataMetaInfo meta_main(num_all_attribute);
		if (cmdline.hasParameter(param_meta_file)) {
			meta_main.loadGroupsFromFile(cmdline.getValue(param_meta_file));
		}
		
		// build the joined meta table
		for (uint r = 0; r < train.relation.dim; r++) {
			train.relation(r).data->attr_offset = num_all_attribute;
			num_all_attribute += train.relation(r).data->num_feature;
		}
		DataMetaInfo meta(num_all_attribute);
		{
			meta.num_attr_groups = meta_main.num_attr_groups;
			for (uint r = 0; r < relation.dim; r++) 
				meta.num_attr_groups += relation(r)->meta->num_attr_groups;
			
			meta.num_attr_per_group.setSize(meta.num_attr_groups);
			meta.num_attr_per_group.init(0);		
			for (uint i = 0; i < meta_main.attr_group.dim; i++) {
				meta.attr_group(i) = meta_main.attr_group(i);
				meta.num_attr_per_group(meta.attr_group(i))++;
			}

			uint attr_cntr = meta_main.attr_group.dim;
			uint attr_group_cntr = meta_main.num_attr_groups;
			for (uint r = 0; r < relation.dim; r++) {
				for (uint i = 0; i < relation(r)->meta->attr_group.dim; i++) {
					meta.attr_group(i+attr_cntr) = attr_group_cntr + relation(r)->meta->attr_group(i);
					meta.num_attr_per_group(attr_group_cntr + relation(r)->meta->attr_group(i))++;
				}
				attr_cntr += relation(r)->meta->attr_group.dim;
				attr_group_cntr += relation(r)->meta->num_attr_groups;
			}
			if (cmdline.getValue(param_verbosity, 0) > 0)
				{ meta.debug(); }
	
		}
		meta.num_relations = train.relation.dim;

		// (2) Setup the factorization machine
		fm_model fm;
		{
			fm.num_attribute = num_all_attribute;
			fm.init_stdev = cmdline.getValue(param_init_stdev, 0.1);
			// set the number of dimensions in the factorization
			{ 
				vector<int> dim = cmdline.getIntValues(param_dim);
				assert(dim.size() == 3);
				fm.k0 = dim[0] != 0;
				fm.k1 = dim[1] != 0;
				fm.num_factor = dim[2];					
			}			
			fm.init();		
			
		}

		// (3) Setup the learning method:
		fm_learn* fml;
		if (! cmdline.getValue(param_method).compare("sgd")) {
	 		fml = new fm_learn_sgd_element();
			((fm_learn_sgd*)fml)->num_iter = cmdline.getValue(param_num_iter, 100);

		} else if (! cmdline.getValue(param_method).compare("sgda")) {
			assert(validation != NULL);		
	 		fml = new fm_learn_sgd_element_adapt_reg();
			((fm_learn_sgd*)fml)->num_iter = cmdline.getValue(param_num_iter, 100);
			((fm_learn_sgd_element_adapt_reg*)fml)->validation = validation;

		} else if (! cmdline.getValue(param_method).compare("mcmc")) {
			fm.w.init_normal(fm.init_mean, fm.init_stdev);
	 		fml = new fm_learn_mcmc_simultaneous();
			fml->validation = validation;
			((fm_learn_mcmc*)fml)->num_iter = cmdline.getValue(param_num_iter, 100);
			((fm_learn_mcmc*)fml)->num_eval_cases = cmdline.getValue(param_num_eval_cases, test.num_cases);
		
			((fm_learn_mcmc*)fml)->do_sample = cmdline.getValue(param_do_sampling, true);
			((fm_learn_mcmc*)fml)->do_multilevel = cmdline.getValue(param_do_multilevel, true);
		} else {
			throw "unknown method";
		}
		fml->fm = &fm;
		fml->max_target = train.max_target;
		fml->min_target = train.min_target;
		fml->meta = &meta;
		if (! cmdline.getValue("task").compare("r") ) {
			fml->task = 0;
		} else if (! cmdline.getValue("task").compare("c") ) {
			fml->task = 1;
			for (uint i = 0; i < train.target.dim; i++) 
				{ if (train.target(i) <= 0.0) { train.target(i) = -1.0; } else {train.target(i) = 1.0; } }
			for (uint i = 0; i < test.target.dim; i++) 
				{ if (test.target(i) <= 0.0) { test.target(i) = -1.0; } else {test.target(i) = 1.0; } }
			if (validation != NULL) {
				for (uint i = 0; i < validation->target.dim; i++) 
					{ if (validation->target(i) <= 0.0) { validation->target(i) = -1.0; } else {validation->target(i) = 1.0; } }
			}
		} else {
			throw "unknown task";
		}
		
		// (4) init the logging
		RLog* rlog = NULL;	 
		if (cmdline.hasParameter(param_r_log)) {
			ofstream* out_rlog = NULL;
			string r_log_str = cmdline.getValue(param_r_log);
	 		out_rlog = new ofstream(r_log_str.c_str());
	 		if (! out_rlog->is_open())	{
	 			throw "Unable to open file " + r_log_str;
	 		}
	 		cout << "logging to " << r_log_str.c_str() << endl;
			rlog = new RLog(out_rlog);
	 	}
	 	
		fml->log = rlog;
		fml->init();
		if (! cmdline.getValue(param_method).compare("mcmc")) {
			// set the regularization; for als and mcmc this can be individual per group
			{ 
	 			vector<double> reg = cmdline.getDblValues(param_regular);
				assert((reg.size() == 0) || (reg.size() == 1) || (reg.size() == 3) || (reg.size() == (1+meta.num_attr_groups*2)));
				if (reg.size() == 0) {
					fm.reg0 = 0.0;
					fm.regw = 0.0;
					fm.regv = 0.0;
					((fm_learn_mcmc*)fml)->w_lambda.init(fm.regw);
					((fm_learn_mcmc*)fml)->v_lambda.init(fm.regv);
				} else if (reg.size() == 1) {
					fm.reg0 = reg[0];
					fm.regw = reg[0];
					fm.regv = reg[0];
					((fm_learn_mcmc*)fml)->w_lambda.init(fm.regw);
					((fm_learn_mcmc*)fml)->v_lambda.init(fm.regv);					
				} else if (reg.size() == 3) {
					fm.reg0 = reg[0];
					fm.regw = reg[1];
					fm.regv = reg[2];
					((fm_learn_mcmc*)fml)->w_lambda.init(fm.regw);
					((fm_learn_mcmc*)fml)->v_lambda.init(fm.regv);
				} else {
					fm.reg0 = reg[0];
					fm.regw = 0.0;
					fm.regv = 0.0;
					int j = 1;
					for (uint g = 0; g < meta.num_attr_groups; g++) {
						((fm_learn_mcmc*)fml)->w_lambda(g) = reg[j];
						j++;
					}
					for (uint g = 0; g < meta.num_attr_groups; g++) {
						for (int f = 0; f < fm.num_factor; f++) {
							((fm_learn_mcmc*)fml)->v_lambda(g,f) = reg[j];
						}
 						j++;
					}
				}

			}
		} else {
			// set the regularization; for standard SGD, groups are not supported
			{ 
	 			vector<double> reg = cmdline.getDblValues(param_regular);
				assert((reg.size() == 0) || (reg.size() == 1) || (reg.size() == 3));
				if (reg.size() == 0) {
					fm.reg0 = 0.0;
					fm.regw = 0.0;
					fm.regv = 0.0;
				} else if (reg.size() == 1) {
					fm.reg0 = reg[0];
					fm.regw = reg[0];
					fm.regv = reg[0];
				} else {
					fm.reg0 = reg[0];
					fm.regw = reg[1];
					fm.regv = reg[2];
				}		
			}
		}
		{
			fm_learn_sgd* fmlsgd= dynamic_cast<fm_learn_sgd*>(fml); 
			if (fmlsgd) {
				// set the learning rates (individual per layer)
				{ 
		 			vector<double> lr = cmdline.getDblValues(param_learn_rate);
					assert((lr.size() == 1) || (lr.size() == 3));
					if (lr.size() == 1) {
						fmlsgd->learn_rate = lr[0];
						fmlsgd->learn_rates.init(lr[0]);
					} else {
						fmlsgd->learn_rate = 0;
						fmlsgd->learn_rates(0) = lr[0];
						fmlsgd->learn_rates(1) = lr[1];
						fmlsgd->learn_rates(2) = lr[2];
					}		
				}
			}
		}
		if (rlog != NULL) {
			rlog->init();
		}
		
		if (cmdline.getValue(param_verbosity, 0) > 0) { 
			fm.debug();			
			fml->debug();			
		}	

		// () learn		
		fml->learn(train, test);

		// () Prediction at the end  (not for mcmc and als)
		if (cmdline.getValue(param_method).compare("mcmc")) {
			cout << "Final\t" << "Train=" << fml->evaluate(train) << "\tTest=" << fml->evaluate(test) << endl;	
		}

		// () Save prediction
		if (cmdline.hasParameter(param_out)) {
			DVector<double> pred;
			pred.setSize(test.num_cases);
			fml->predict(test, pred);
			pred.save(cmdline.getValue(param_out));	
		}
				 	

	} catch (string &e) {
		cerr << endl << "ERROR: " << e << endl;
	} catch (char const* &e) {
		cerr << endl << "ERROR: " << e << endl;
	}


}
示例#2
0
文件: libfm.cpp 项目: doobwa/pipeline
int main(int argc, char **argv) { 
 	
 	srand ( time(NULL) );
	try {
		CMDLine cmdline(argc, argv);
		std::cout << "libFM" << std::endl;
		std::cout << "  Version: 1.10" << std::endl;
		std::cout << "  Author:  Steffen Rendle, [email protected], http://www.libfm.org/" << std::endl;
		std::cout << "  License: Free for academic use. See license.txt." << std::endl;
		std::cout << "----------------------------------------------------------------------------" << std::endl;
		
		const std::string param_task		= cmdline.registerParameter("task", "r=regression, c=binary classification [MANDATORY]");
		const std::string param_train_file	= cmdline.registerParameter("train", "filename for training data [MANDATORY]");
		const std::string param_test_file	= cmdline.registerParameter("test", "filename for test data [MANDATORY]");
		const std::string param_out		= cmdline.registerParameter("out", "filename for output");

		const std::string param_dim		= cmdline.registerParameter("dim", "'k0,k1,k2': k0=use bias, k1=use 1-way interactions, k2=dim of 2-way interactions [MANDATORY]");
		const std::string param_regular		= cmdline.registerParameter("regular", "'r0,r1,r2': r0=bias regularization, r1=1-way regularization, r2=2-way regularization [MANDATORY]");
		const std::string param_init_stdev	= cmdline.registerParameter("init_stdev", "stdev for initialization of 2-way factors; default=0.01");
		const std::string param_num_iter	= cmdline.registerParameter("iter", "number of iterations for SGD; default=100");
		const std::string param_learn_rate	= cmdline.registerParameter("learn_rate", "learn_rate for SGD; default=0.1");
		const std::string param_method		= cmdline.registerParameter("method", "learning method (SGD or ALS); default=SGD");
	
		const std::string param_verbosity	= cmdline.registerParameter("verbosity", "how much infos to print; default=0");
		const std::string param_r_log		= cmdline.registerParameter("rlog", "write measurements within iterations to a file; default=''");
		const std::string param_help            = cmdline.registerParameter("help", "this screen");

		if (cmdline.hasParameter(param_help) || (argc == 1)) {
			cmdline.print_help();
			return 0;
		}
		cmdline.checkParameters();

		// (1) Load the data
		std::cout << "Loading train...\t";
		Data train;
		train.load(cmdline.getValue(param_train_file));
		if (cmdline.getValue(param_verbosity, 0) > 0) { train.debug(); }
		std::cout << "Loading test... \t";
		Data test;
		test.load(cmdline.getValue(param_test_file));
		if (cmdline.getValue(param_verbosity, 0) > 0) { test.debug(); }
		
		// (2) Setup the factorization machine
		fm_model fm;
		{
			fm.num_attribute = max(train.num_feature, test.num_feature);
			fm.init_stdev = cmdline.getValue(param_init_stdev, 0.01);
			// set the number of dimensions in the factorization
			{ 
				vector<int> dim = cmdline.getIntValues(param_dim);
				assert(dim.size() == 3);
				fm.k0 = dim[0] != 0;
				fm.k1 = dim[1] != 0;
				fm.num_factor = dim[2];					
			}
		
			
			fm.init();		
			// set the regularization
			{ 
	 			vector<double> reg = cmdline.getDblValues(param_regular);
				assert(reg.size() == 3);
				fm.reg0 = reg[0];
				fm.regw.init(reg[1]);
				fm.regv.init(reg[2]);					
			}
		}
		// (3) Setup the learning method:
		fm_learn* fml;
		if (! cmdline.getValue(param_method, "SGD").compare("sgd")) {
	 		fml = new fm_learn_sgd_element();
			((fm_learn_sgd*)fml)->num_iter = cmdline.getValue(param_num_iter, 100);
			((fm_learn_sgd*)fml)->learn_rate = cmdline.getValue(param_learn_rate, 0.1);			
		} else if (! cmdline.getValue(param_method).compare("als")) {
	 		fml = new fm_learn_als_simultaneous();
			((fm_learn_als*)fml)->num_iter = cmdline.getValue(param_num_iter, 100);
			if (cmdline.getValue("task").compare("r") ) {
				throw "ALS can only solve regression tasks.";
			}
		} else {
			throw "unknown method";
		}
		fml->fm = &fm;
		fml->max_target = train.max_target;
		fml->min_target = train.min_target;
		if (! cmdline.getValue("task").compare("r") ) {
			fml->task = 0;
		} else if (! cmdline.getValue("task").compare("c") ) {
			fml->task = 1;
			for (uint i = 0; i < train.target.dim; i++) { if (train.target(i) <= 0.0) { train.target(i) = -1.0; } else {train.target(i) = 1.0; } }
			for (uint i = 0; i < test.target.dim; i++) { if (test.target(i) <= 0.0) { test.target(i) = -1.0; } else {test.target(i) = 1.0; } }
		} else {
			throw "unknown task";
		}

		// (4) init the logging
		RLog* rlog = NULL;	 
		if (cmdline.hasParameter(param_r_log)) {
			ofstream* out_rlog = NULL;
			std::string r_log_str = cmdline.getValue(param_r_log);
	 		out_rlog = new ofstream(r_log_str.c_str());
	 		if (! out_rlog->is_open())	{
	 			throw "Unable to open file " + r_log_str;
	 		}
	 		std::cout << "logging to " << r_log_str.c_str() << std::endl;
			rlog = new RLog(out_rlog);
	 	}
	 	
		fml->log = rlog;
		fml->init();
		if (rlog != NULL) {
			rlog->init();
		}
		
		if (cmdline.getValue(param_verbosity, 0) > 0) { 
			fm.debug();			
			fml->debug();			
		}	

		// () learn		
		fml->learn(train, test);

		// () Prediction
		std::cout << "Final\t" << "Train=" << fml->evaluate(train) << "\tTest=" << fml->evaluate(test) << std::endl;	

		// () Save prediction
		if (cmdline.hasParameter(param_out)) {
			DVector<double> pred;
			pred.setSize(test.data.dim);
			fml->predict(test, pred);
			pred.save(cmdline.getValue(param_out));	
		}
				 	

	} catch (std::string &e) {
		std::cerr << e << std::endl;
	} catch (char const* &e) {
		std::cerr << e << std::endl;
	}

}
示例#3
0
int main(int argc, char **argv) { 
 	
	try {
		CMDLine cmdline(argc, argv);
		std::cout << "----------------------------------------------------------------------------" << std::endl;
		std::cout << "libFM" << std::endl;
		std::cout << "  Version: 1.4.2" << std::endl;
		std::cout << "  Author:  Steffen Rendle, [email protected]" << std::endl;
		std::cout << "  WWW:     http://www.libfm.org/" << std::endl;
		std::cout << "This program comes with ABSOLUTELY NO WARRANTY; for details see license.txt." << std::endl;
		std::cout << "This is free software, and you are welcome to redistribute it under certain" << std::endl;
		std::cout << "conditions; for details see license.txt." << std::endl;
		std::cout << "----------------------------------------------------------------------------" << std::endl;

		const std::string param_task		= cmdline.registerParameter("task", "r=regression, c=binary classification [MANDATORY]");
		const std::string param_meta_file	= cmdline.registerParameter("meta", "filename for meta information about data set");
		const std::string param_train_file	= cmdline.registerParameter("train", "filename for training data [MANDATORY]");
		const std::string param_test_file	= cmdline.registerParameter("test", "filename for test data [MANDATORY]");
		const std::string param_val_file	= cmdline.registerParameter("validation", "filename for validation data (only for SGDA)");
		const std::string param_out		= cmdline.registerParameter("out", "filename for output");

		const std::string param_dim		= cmdline.registerParameter("dim", "'k0,k1,k2': k0=use bias, k1=use 1-way interactions, k2=dim of 2-way interactions; default=1,1,8");
		const std::string param_regular		= cmdline.registerParameter("regular", "'r0,r1,r2' for SGD and ALS: r0=bias regularization, r1=1-way regularization, r2=2-way regularization");
		const std::string param_init_stdev	= cmdline.registerParameter("init_stdev", "stdev for initialization of 2-way factors; default=0.1");
		const std::string param_num_iter	= cmdline.registerParameter("iter", "number of iterations; default=100");
		const std::string param_learn_rate	= cmdline.registerParameter("learn_rate", "learn_rate for SGD; default=0.1");

		const std::string param_method		= cmdline.registerParameter("method", "learning method (SGD, SGDA, ALS, MCMC, BPR, BPRA); default=MCMC");

		const std::string param_verbosity	= cmdline.registerParameter("verbosity", "how much infos to print; default=0");
		const std::string param_r_log		= cmdline.registerParameter("rlog", "write measurements within iterations to a file; default=''");
		const std::string param_seed		= cmdline.registerParameter("seed", "integer value, default=None");

		const std::string param_help            = cmdline.registerParameter("help", "this screen");

		const std::string param_relation	= cmdline.registerParameter("relation", "BS: filenames for the relations, default=''");

		const std::string param_cache_size = cmdline.registerParameter("cache_size", "cache size for data storage (only applicable if data is in binary format), default=infty");

		//FABIO PARAMETERS for BPR
		const std::string param_neg_sample = cmdline.registerParameter("neg_sample", "number of the negative pair samples drawn for each training observation, default 1 (only for bpr or bpra)");
		const std::string param_out_conv = cmdline.registerParameter("out_conv", "filename for output the convergence info (only for bpr or bpra)");
		const std::string param_threads = cmdline.registerParameter("threads", "number of threads (only for bpr or bpra)");
		const std::string param_out_vectors = cmdline.registerParameter("out_vectors", "filename for output the latent vectors");
		const std::string param_resume_state = cmdline.registerParameter("resume_state", "files with the vectors to resume the state of the FM");
		//...output ranked list
		const std::string param_top_k = cmdline.registerParameter("top_k", "number of recommendation for bpr or bpra, default 100");
		const std::string param_list_id_output = cmdline.registerParameter("list_id_output", "list of target ids (in FIXED block), comma separated without spaces, to compute output ranked list for, default all (only for bpr or bpra)");
		const std::string param_out_ranked_list_dir= cmdline.registerParameter("out_ranked_list_dir", "directory where to store the output ranked list, one file for each target id (only for bpr or bpra)");

		const std::string param_do_sampling	= "do_sampling";
		const std::string param_do_multilevel	= "do_multilevel";
		const std::string param_num_eval_cases  = "num_eval_cases";

		if (cmdline.hasParameter(param_help) || (argc == 1)) {
			cmdline.print_help();
			return 0;
		}

		cmdline.checkParameters();
		int NUM_THREADS = cmdline.getValue(param_threads, 1);

		// Seed
		long int seed = cmdline.getValue(param_seed, time(NULL));
		srand ( seed );

		if (! cmdline.hasParameter(param_method)) { cmdline.setValue(param_method, "mcmc"); }
		if (! cmdline.hasParameter(param_init_stdev)) { cmdline.setValue(param_init_stdev, "0.1"); }
		if (! cmdline.hasParameter(param_dim)) { cmdline.setValue(param_dim, "1,1,8"); }
		if (! cmdline.hasParameter(param_learn_rate)) { cmdline.setValue(param_learn_rate, "0.1"); }

		if (! cmdline.getValue(param_method).compare("als")) { // als is an mcmc without sampling and hyperparameter inference
			cmdline.setValue(param_method, "mcmc");
			if (! cmdline.hasParameter(param_do_sampling)) { cmdline.setValue(param_do_sampling, "0"); }
			if (! cmdline.hasParameter(param_do_multilevel)) { cmdline.setValue(param_do_multilevel, "0"); }
		}

		// (1) Load the data
		std::cout << "Loading train...\t" << std::endl;
		Data train(
			cmdline.getValue(param_cache_size, 0),
			! (!cmdline.getValue(param_method).compare("mcmc")), // no original data for mcmc
			! (!cmdline.getValue(param_method).compare("sgd") || !cmdline.getValue(param_method).compare("sgda") || !cmdline.getValue(param_method).compare("bpr") || !cmdline.getValue(param_method).compare("bpra")) // no transpose data for sgd, sgda, bpr, bpra
		);
		train.load(cmdline.getValue(param_train_file));
		if (cmdline.getValue(param_verbosity, 0) > 0) { train.debug(); }

		std::cout << "Loading test... \t" << std::endl;
		Data test(
			cmdline.getValue(param_cache_size, 0),
			! (!cmdline.getValue(param_method).compare("mcmc")), // no original data for mcmc
			! (!cmdline.getValue(param_method).compare("sgd") || !cmdline.getValue(param_method).compare("sgda") || !cmdline.getValue(param_method).compare("bpr") || !cmdline.getValue(param_method).compare("bpra")) // no transpose data for sgd, sgda, bpr, bpra
		);
		test.load(cmdline.getValue(param_test_file));
		if (cmdline.getValue(param_verbosity, 0) > 0) { test.debug(); }

		Data* validation = NULL;
		if (cmdline.hasParameter(param_val_file)) {
			if (cmdline.getValue(param_method).compare("sgda") && cmdline.getValue(param_method).compare("bpra")) {
				std::cout << "WARNING: Validation data is only used for SGDA and BPRA. The data is ignored." << std::endl;
			} else {
				std::cout << "Loading validation set...\t" << std::endl;
				validation = new Data(
					cmdline.getValue(param_cache_size, 0),
					! (!cmdline.getValue(param_method).compare("mcmc")), // no original data for mcmc
					! (!cmdline.getValue(param_method).compare("sgd") || !cmdline.getValue(param_method).compare("sgda") || !cmdline.getValue(param_method).compare("bpr") || !cmdline.getValue(param_method).compare("bpra")) // no transpose data for sgd, sgda, bpr, bpra
				);
				validation->load(cmdline.getValue(param_val_file));
				if (cmdline.getValue(param_verbosity, 0) > 0) { validation->debug(); }
			}
		}

		DVector<RelationData*> relation;
		// (1.2) Load relational data
		{
			vector<std::string> rel = cmdline.getStrValues(param_relation);

			std::cout << "#relations: " << rel.size() << std::endl;
			relation.setSize(rel.size());
			train.relation.setSize(rel.size());
			test.relation.setSize(rel.size());
			for (uint i = 0; i < rel.size(); i++) {
				 relation(i) = new RelationData(
					cmdline.getValue(param_cache_size, 0),
					! (!cmdline.getValue(param_method).compare("mcmc")), // no original data for mcmc
					! (!cmdline.getValue(param_method).compare("sgd") || !cmdline.getValue(param_method).compare("sgda") || !cmdline.getValue(param_method).compare("bpr") || !cmdline.getValue(param_method).compare("bpra")) // no transpose data for sgd, sgda, bpr, bpra
				);
				relation(i)->load(rel[i]);
				train.relation(i).data = relation(i);
				test.relation(i).data = relation(i);
				train.relation(i).load(rel[i] + ".train", train.num_cases);
				test.relation(i).load(rel[i] + ".test", test.num_cases);

				if (cmdline.hasParameter(param_val_file)){
					std::cout << "Loading relations validation set...\t" << std::endl;
					train.relation(i).loadValidation(rel[i] + ".validation", validation->num_cases);
				}
			}
		}

		// (1.3) Load meta data
		std::cout << "Loading meta data...\t" << std::endl;

		// (main table)
		uint num_all_attribute = std::max(train.num_feature, test.num_feature);
		if (validation != NULL) {
			num_all_attribute = std::max(num_all_attribute, (uint) validation->num_feature);
		}
		DataMetaInfo meta_main(num_all_attribute);
		if (cmdline.hasParameter(param_meta_file)) {
			meta_main.loadGroupsFromFile(cmdline.getValue(param_meta_file));
		}

		// build the joined meta table
		for (uint r = 0; r < train.relation.dim; r++) {
			train.relation(r).data->attr_offset = num_all_attribute;
			num_all_attribute += train.relation(r).data->num_feature;
		}
		DataMetaInfo meta(num_all_attribute);
		{
			uint attr_cntr = 0;
			meta.num_attr_groups = 0;
			int attr_group_cntr = 0;
			if (cmdline.hasParameter(param_meta_file)) {
				meta.num_attr_groups = meta_main.num_attr_groups;
				for (uint i = 0; i < meta_main.attr_group.dim; i++) {
					meta.attr_group(i) = meta_main.attr_group(i);
					meta.num_attr_per_group(meta.attr_group(i))++;
				}
				attr_cntr = meta_main.attr_group.dim;
				attr_group_cntr = meta_main.num_attr_groups;
			}

			for (uint r = 0; r < relation.dim; r++) {
				meta.num_attr_groups += relation(r)->meta->num_attr_groups;
			}
			meta.num_attr_per_group.setSize(meta.num_attr_groups);
			meta.num_attr_per_group.init(0);

			for (uint r = 0; r < relation.dim; r++) {
				for (uint i = 0; i < relation(r)->meta->attr_group.dim; i++) {
					meta.attr_group(i+attr_cntr) = attr_group_cntr + relation(r)->meta->attr_group(i);
					meta.num_attr_per_group(attr_group_cntr + relation(r)->meta->attr_group(i))++;
				}
				attr_cntr += relation(r)->meta->attr_group.dim;
				attr_group_cntr += relation(r)->meta->num_attr_groups;
			}
			if (cmdline.getValue(param_verbosity, 0) > 0) { meta.debug(); }

		}
		meta.num_relations = train.relation.dim;

		// (2) Setup the factorization machine
		fm_model fm;
		{
			fm.num_attribute = num_all_attribute;
			fm.init_stdev = cmdline.getValue(param_init_stdev, 0.1);
			// set the number of dimensions in the factorization
			{
				vector<int> dim = cmdline.getIntValues(param_dim);
				assert(dim.size() == 3);
				fm.k0 = dim[0] != 0;
				fm.k1 = dim[1] != 0;
				fm.num_factor = dim[2];
			}
			fm.init();
		}

		//(2.1 fabio) resume state from file [IT MUST HAVE BEEN TRAINED WITH THE SAME PARAMETERS]
		if (cmdline.hasParameter(param_resume_state)) {
			std::cout << "Resuming state from file..." << std::endl;
			fm.resumeState(cmdline.getValue(param_resume_state));
		}

		// (3) Setup the learning method:
		fm_learn* fml;
		if (! cmdline.getValue(param_method).compare("sgd")) {
	 		fml = new fm_learn_sgd_element();
			((fm_learn_sgd*)fml)->num_iter = cmdline.getValue(param_num_iter, 100);

		} else if (! cmdline.getValue(param_method).compare("sgda")) {
			assert(validation != NULL);
	 		fml = new fm_learn_sgd_element_adapt_reg();
			((fm_learn_sgd*)fml)->num_iter = cmdline.getValue(param_num_iter, 100);
			((fm_learn_sgd_element_adapt_reg*)fml)->validation = validation;

		} else if (! cmdline.getValue(param_method).compare("mcmc")) {
			fm.w.init_normal(fm.init_mean, fm.init_stdev);
	 		fml = new fm_learn_mcmc_simultaneous();
			fml->validation = validation;
			((fm_learn_mcmc*)fml)->num_iter = cmdline.getValue(param_num_iter, 100);
			((fm_learn_mcmc*)fml)->num_eval_cases = cmdline.getValue(param_num_eval_cases, test.num_cases);

			((fm_learn_mcmc*)fml)->do_sample = cmdline.getValue(param_do_sampling, true);
			((fm_learn_mcmc*)fml)->do_multilevel = cmdline.getValue(param_do_multilevel, true);
		}


		///BAYESIAN PROBABILISTIC LEARNING
		else if (! cmdline.getValue(param_method).compare("bpr")) { //FABIO PETRONI modification
			if (relation.dim>0){
				if (NUM_THREADS>1){
					//PARALLEL BLOCK BPR
					fml = new fm_learn_sgd_element_BPR_blocks_parallel();
					((fm_learn_sgd_element_BPR_blocks_parallel*)fml)->num_neg_samples = cmdline.getValue(param_neg_sample, 1);
					((fm_learn_sgd_element_BPR_blocks_parallel*)fml)->NUM_THREADS = NUM_THREADS;
					((fm_learn_sgd*)fml)->num_iter = cmdline.getValue(param_num_iter, 100);
					if (cmdline.hasParameter(param_out_conv)) {
						((fm_learn_sgd_element_BPR_blocks_parallel*)fml)->file_out_conv = cmdline.getValue(param_out_conv);
					}
				}
				else{
					//BLOCK BPR
					fml = new fm_learn_sgd_element_BPR_blocks();
					((fm_learn_sgd_element_BPR_blocks*)fml)->num_neg_samples = cmdline.getValue(param_neg_sample, 1);
					((fm_learn_sgd*)fml)->num_iter = cmdline.getValue(param_num_iter, 100);
					if (cmdline.hasParameter(param_out_conv)) {
						((fm_learn_sgd_element_BPR_blocks*)fml)->file_out_conv = cmdline.getValue(param_out_conv);
					}
				}
			}
			else{
				throw "bpr error. For BPR --relation is needed";
			}
		}
		else if (! cmdline.getValue(param_method).compare("bpra")) { //FABIO PETRONI modification
			if (NUM_THREADS>1){
				//PARALLEL BLOCK BPRA (with adaptive regularization)
				fml = new fm_learn_sgd_element_BPR_blocks_adapt_reg_parallel();
				((fm_learn_sgd_element_BPR_blocks_adapt_reg_parallel*)fml)->num_neg_samples = cmdline.getValue(param_neg_sample, 1);
				((fm_learn_sgd_element_BPR_blocks_adapt_reg_parallel*)fml)->NUM_THREADS = NUM_THREADS;
				((fm_learn_sgd*)fml)->num_iter = cmdline.getValue(param_num_iter, 100);
				if (cmdline.hasParameter(param_out_conv)) {
					((fm_learn_sgd_element_BPR_blocks_adapt_reg_parallel*)fml)->file_out_conv = cmdline.getValue(param_out_conv);
				}
			}
			else{
				// BLOCK BPRA (with adaptive regularization)
				fml = new fm_learn_sgd_element_BPR_blocks_adapt_reg();
				((fm_learn_sgd_element_BPR_blocks_adapt_reg*)fml)->num_neg_samples = cmdline.getValue(param_neg_sample, 1);
				((fm_learn_sgd*)fml)->num_iter = cmdline.getValue(param_num_iter, 100);
				if (cmdline.hasParameter(param_out_conv)) {
					((fm_learn_sgd_element_BPR_blocks_adapt_reg*)fml)->file_out_conv = cmdline.getValue(param_out_conv);
				}
			}
		}
		else {
			throw "unknown method";
		}
		fml->fm = &fm;
		fml->max_target = train.max_target;
		fml->min_target = train.min_target;

		if (cmdline.getValue(param_method).compare("bpr") || cmdline.getValue(param_method).compare("bpra")){
			//there is not a minimum or maximum target. Only the relative ranking is important.
			fml->max_target = std::numeric_limits<double>::max();
			fml->min_target = std::numeric_limits<double>::min();
		}

		fml->meta = &meta;
		if (! cmdline.getValue("task").compare("r") ) {
			fml->task = 0;
		} else if (! cmdline.getValue("task").compare("c") ) {
			fml->task = 1;
			for (uint i = 0; i < train.target.dim; i++) { if (train.target(i) <= 0.0) { train.target(i) = -1.0; } else {train.target(i) = 1.0; } }
			for (uint i = 0; i < test.target.dim; i++) { if (test.target(i) <= 0.0) { test.target(i) = -1.0; } else {test.target(i) = 1.0; } }
			if (validation != NULL) {
				for (uint i = 0; i < validation->target.dim; i++) { if (validation->target(i) <= 0.0) { validation->target(i) = -1.0; } else {validation->target(i) = 1.0; } }
			}
		} else {
			throw "unknown task";
		}

		// (4) init the logging
		RLog* rlog = NULL;
		if (cmdline.hasParameter(param_r_log)) {
			ofstream* out_rlog = NULL;
			std::string r_log_str = cmdline.getValue(param_r_log);
	 		out_rlog = new ofstream(r_log_str.c_str());
	 		if (! out_rlog->is_open())	{
	 			throw "Unable to open file " + r_log_str;
	 		}
	 		std::cout << "logging to " << r_log_str.c_str() << std::endl;
			rlog = new RLog(out_rlog);
	 	}

		fml->log = rlog;
		fml->init();
		if (! cmdline.getValue(param_method).compare("mcmc")) {
			// set the regularization; for als and mcmc this can be individual per group
			{
	 			vector<double> reg = cmdline.getDblValues(param_regular);
				assert((reg.size() == 0) || (reg.size() == 1) || (reg.size() == 3) || (reg.size() == (1+meta.num_attr_groups*2)));
				if (reg.size() == 0) {
					fm.reg0 = 0.0;
					fm.regw = 0.0;
					fm.regv = 0.0;
					((fm_learn_mcmc*)fml)->w_lambda.init(fm.regw);
					((fm_learn_mcmc*)fml)->v_lambda.init(fm.regv);
				} else if (reg.size() == 1) {
					fm.reg0 = reg[0];
					fm.regw = reg[0];
					fm.regv = reg[0];
					((fm_learn_mcmc*)fml)->w_lambda.init(fm.regw);
					((fm_learn_mcmc*)fml)->v_lambda.init(fm.regv);
				} else if (reg.size() == 3) {
					fm.reg0 = reg[0];
					fm.regw = reg[1];
					fm.regv = reg[2];
					((fm_learn_mcmc*)fml)->w_lambda.init(fm.regw);
					((fm_learn_mcmc*)fml)->v_lambda.init(fm.regv);
				} else {
					fm.reg0 = reg[0];
					fm.regw = 0.0;
					fm.regv = 0.0;
					int j = 1;
					for (uint g = 0; g < meta.num_attr_groups; g++) {
						((fm_learn_mcmc*)fml)->w_lambda(g) = reg[j];
						j++;
					}
					for (uint g = 0; g < meta.num_attr_groups; g++) {
						for (int f = 0; f < fm.num_factor; f++) {
							((fm_learn_mcmc*)fml)->v_lambda(g,f) = reg[j];
						}
 						j++;
					}
				}

			}
		} else {
			// set the regularization; for standard SGD, groups are not supported
			{
	 			vector<double> reg = cmdline.getDblValues(param_regular);
				assert((reg.size() == 0) || (reg.size() == 1) || (reg.size() == 3));
				if (reg.size() == 0) {
					fm.reg0 = 0.0;
					fm.regw = 0.0;
					fm.regv = 0.0;
				} else if (reg.size() == 1) {
					fm.reg0 = reg[0];
					fm.regw = reg[0];
					fm.regv = reg[0];
				} else {
					fm.reg0 = reg[0];
					fm.regw = reg[1];
					fm.regv = reg[2];
				}
			}
		}
		{
			fm_learn_sgd* fmlsgd= dynamic_cast<fm_learn_sgd*>(fml);
			if (fmlsgd) {
				// set the learning rates (individual per layer)
				{
		 			vector<double> lr = cmdline.getDblValues(param_learn_rate);
					assert((lr.size() == 1) || (lr.size() == 3));
					if (lr.size() == 1) {
						fmlsgd->learn_rate = lr[0];
						fmlsgd->learn_rates.init(lr[0]);
					} else {
						fmlsgd->learn_rate = 0;
						fmlsgd->learn_rates(0) = lr[0];
						fmlsgd->learn_rates(1) = lr[1];
						fmlsgd->learn_rates(2) = lr[2];
					}
				}
			}
		}
		if (rlog != NULL) {
			rlog->init();
		}

		if (cmdline.getValue(param_verbosity, 0) > 0) {
			fm.debug();
			fml->debug();
		}

		// () learn
		fml->learn(train, test);

		// () Prediction at the end  (not for mcmc and als)
		if (cmdline.getValue(param_method).compare("mcmc")) {
			std::cout << "Final\t" << "Train=" << fml->evaluate(train) << "\tTest=" << fml->evaluate(test) << std::endl;
		}

		//compute output ranked list for target ids
		if (cmdline.hasParameter(param_out_ranked_list_dir)){
			std::cout << "Compute and store output ranked list for target ids...\t" << std::endl;
			int TOP_K = cmdline.getValue(param_top_k, 100);
			string out_ranked_list_dir = cmdline.getValue(param_out_ranked_list_dir);
			vector<int> target_ids;
			if (cmdline.hasParameter(param_list_id_output)){
				target_ids = cmdline.getIntValues(param_list_id_output);
			}
			else{
				int n_fixed_cases = train.relation(FIXED_BLOCK).data->num_cases;
				target_ids.resize(n_fixed_cases);
				for (int i = 0; i<n_fixed_cases; i++){
					target_ids[i] = i;
				}
			}
			Recommendation rec;
			rec.fm = &fm;
			rec.target_ids = target_ids;
			rec.MAX_THREADS = NUM_THREADS;
			rec.TOP_K = TOP_K;
			rec.OUT_DIR = out_ranked_list_dir;
			rec.evaluate(train);
		}

		// () Save prediction
		if (cmdline.hasParameter(param_out)) {
			DVector<double> pred;
			pred.setSize(test.num_cases);
			if (cmdline.getValue(param_method).compare("bpr") && cmdline.hasParameter(param_relation)) { //BLOCK BPR
				if (NUM_THREADS>1){
					//PARALLEL BLOCK BPR
					((fm_learn_sgd_element_BPR_blocks_parallel*)fml)->predict(test, pred);
				}
				else{
					//BLOCK BPR
					((fm_learn_sgd_element_BPR_blocks*)fml)->predict(test, pred);
				}

			}
			else if (cmdline.getValue(param_method).compare("bpra")){ //BLOCK BPR with adaptive regularization
				if (NUM_THREADS>1){
					//PARALLEL BLOCK BPRA
					((fm_learn_sgd_element_BPR_blocks_adapt_reg_parallel*)fml)->predict(test, pred);
				}
				else{
					//BLOCK BPRA
					((fm_learn_sgd_element_BPR_blocks_adapt_reg*)fml)->predict(test, pred);
				}
			}
			else{
				fml->predict(test, pred);
			}
			pred.save(cmdline.getValue(param_out));
		}

		// () write down the latent vectors (unary and pairwise interactions)
		if (cmdline.hasParameter(param_out_vectors)) {
			fm.printOutState(cmdline.getValue(param_out_vectors));
		}

	} catch (std::string &e) {
		std::cerr << std::endl << "ERROR: " << e << std::endl;
	} catch (char const* &e) {
		std::cerr << std::endl << "ERROR: " << e << std::endl;
	}
}
示例#4
0
文件: fm.cpp 项目: farseev/mobility
int fm_train_test(Value& config, FMFeature trainData, FMFeature testData, FMTarget& prediction)
{
	try
	{
		// (1) Load the data
		std::cout << "Loading train...\t" << std::endl;

		bool has_x = (string(config["method"].GetString()) != "mcmc"); // no original data for mcmc
		bool has_xt = (string(config["method"].GetString()) != "sgd" // no transpose data for sgd, sgda
				&& string(config["method"].GetString()) != "sgda");

		Data train(0, has_x, has_xt);

		FMMemory trainMem, testMem;

		CreateData(train, trainData, trainMem);
		if (has_xt)
			CreateDataT(train, trainMem);

		std::cout << "Loading test... \t" << std::endl;
		Data test(0, has_x, has_xt); // no transpose data for sgd, sgda

		CreateData(test, testData, testMem);
		if (has_xt)
			CreateDataT(test, testMem);

		uint num_all_attribute = train.num_feature;

		DataMetaInfo meta(num_all_attribute);
		//meta.num_attr_per_group.setSize(meta.num_attr_groups);
		//meta.num_attr_per_group.init(0);
		meta.num_relations = train.relation.dim;

		// (2) Setup the factorization machine
		fm_model fm;
		{
			fm.num_attribute = num_all_attribute;
			fm.init_stdev = config["init_stdev"].GetDouble();
			// set the number of dimensions in the factorization
			{
				const Value& dimValue = config["dim"];
				vector<int> dim;
				for (int i = 0; i < dimValue.Size(); i++)
					dim.push_back(dimValue[i].GetInt());
				assert(dim.size() == 3);
				fm.k0 = dim[0] != 0;
				fm.k1 = dim[1] != 0;
				fm.num_factor = dim[2];
			}
			fm.init();

		}

		// (3) Setup the learning method:
		fm_learn* fml;
		if (string(config["method"].GetString()) == "sgd")
		{
			fml = new fm_learn_sgd_element();
			((fm_learn_sgd*) fml)->num_iter = config["iter"].GetInt();

		}
		else if (string(config["method"].GetString()) == "mcmc")
		{
			fm.w.init_normal(fm.init_mean, fm.init_stdev);
			fml = new fm_learn_mcmc_simultaneous();
			fml->validation = NULL;
			((fm_learn_mcmc*) fml)->num_iter = config["iter"].GetInt();
			((fm_learn_mcmc*) fml)->num_eval_cases = test.num_cases;
			((fm_learn_mcmc*) fml)->do_sample = true;
			((fm_learn_mcmc*) fml)->do_multilevel = true;
		}
		else
			throw "unknown method";

		fml->fm = &fm;
		fml->max_target = train.max_target;
		fml->min_target = train.min_target;
		fml->meta = &meta;

		if (string(config["task"].GetString()) == "regression")
		{
			fml->task = 0;
		}
		else if (string(config["task"].GetString()) == "classification")
		{
			fml->task = 1;
			for (uint i = 0; i < train.target.dim; i++)
			{
				if (train.target(i) <= 0.0)
					train.target(i) = -1.0;
				else
					train.target(i) = 1.0;
			}
			for (uint i = 0; i < test.target.dim; i++)
			{
				if (test.target(i) <= 0.0)
					test.target(i) = -1.0;
				else
					test.target(i) = 1.0;
			}
		}
		else
			throw "unknown task";
	 	
		fml->log = NULL;
		fml->init();

		if (string(config["method"].GetString()) == "mcmc")
		{
			// set the regularization; for als and mcmc this can be individual per group
			{
				const Value& regValue = config["regular"];
				vector<double> reg;
				for (int i = 0; i < regValue.Size(); i++)
					reg.push_back(regValue[i].GetDouble());
				assert(
						(reg.size() == 0) || (reg.size() == 1)
								|| (reg.size() == 3)
								|| (reg.size() == (1 + meta.num_attr_groups * 2)));
				if (reg.size() == 0)
				{
					fm.reg0 = 0.0;
					fm.regw = 0.0;
					fm.regv = 0.0;
					((fm_learn_mcmc*) fml)->w_lambda.init(fm.regw);
					((fm_learn_mcmc*) fml)->v_lambda.init(fm.regv);
				}
				else if (reg.size() == 1)
				{
					fm.reg0 = reg[0];
					fm.regw = reg[0];
					fm.regv = reg[0];
					((fm_learn_mcmc*) fml)->w_lambda.init(fm.regw);
					((fm_learn_mcmc*) fml)->v_lambda.init(fm.regv);
				}
				else if (reg.size() == 3)
				{
					fm.reg0 = reg[0];
					fm.regw = reg[1];
					fm.regv = reg[2];
					((fm_learn_mcmc*) fml)->w_lambda.init(fm.regw);
					((fm_learn_mcmc*) fml)->v_lambda.init(fm.regv);
				}
				else
				{
					fm.reg0 = reg[0];
					fm.regw = 0.0;
					fm.regv = 0.0;
					int j = 1;
					for (uint g = 0; g < meta.num_attr_groups; g++)
					{
						((fm_learn_mcmc*) fml)->w_lambda(g) = reg[j];
						j++;
					}
					for (uint g = 0; g < meta.num_attr_groups; g++)
					{
						for (int f = 0; f < fm.num_factor; f++)
						{
							((fm_learn_mcmc*) fml)->v_lambda(g, f) = reg[j];
						}
						j++;
					}
				}

			}
		}
		else
		{
			// set the regularization; for standard SGD, groups are not supported
			{
				const Value& regValue = config["regular"];
				vector<double> reg;
				for (int i = 0; i < regValue.Size(); i++)
					reg.push_back(regValue[i].GetDouble());
				assert(
						(reg.size() == 0) || (reg.size() == 1)
								|| (reg.size() == 3));
				if (reg.size() == 0)
				{
					fm.reg0 = 0.0;
					fm.regw = 0.0;
					fm.regv = 0.0;
				}
				else if (reg.size() == 1)
				{
					fm.reg0 = reg[0];
					fm.regw = reg[0];
					fm.regv = reg[0];
				}
				else
				{
					fm.reg0 = reg[0];
					fm.regw = reg[1];
					fm.regv = reg[2];
				}
			}
		}
		{
			fm_learn_sgd* fmlsgd = dynamic_cast<fm_learn_sgd*>(fml);
			if (fmlsgd)
			{
				// set the learning rates (individual per layer)
				{
					const Value& lrValue = config["learn_rate"];
					vector<double> lr;
					for (int i = 0; i < lrValue.Size(); i++)
						lr.push_back(lrValue[i].GetDouble());
					assert((lr.size() == 1) || (lr.size() == 3));
					if (lr.size() == 1)
					{
						fmlsgd->learn_rate = lr[0];
						fmlsgd->learn_rates.init(lr[0]);
					}
					else
					{
						fmlsgd->learn_rate = 0;
						fmlsgd->learn_rates(0) = lr[0];
						fmlsgd->learn_rates(1) = lr[1];
						fmlsgd->learn_rates(2) = lr[2];
					}
				}
			}
		}

		// () learn
		fml->learn(train, test);

		// () Prediction at the end  (not for mcmc and als)
		if (string(config["method"].GetString()) != "mcmc")
		{
			std::cout << "Final\t" << "Train=" << fml->evaluate(train)
					<< "\tTest=" << fml->evaluate(test) << std::endl;
		}

		// () Save prediction
		DVector<double> pred;
		pred.setSize(test.num_cases);
		fml->predict(test, pred);
		for (int i = 0; i < test.num_cases; i++)
			prediction.push_back(pred(i));
		if (config["pred_output"].GetBool())
			pred.save(config["pred"].GetString());

		if (string(config["method"].GetString()) == "sgd")
		{
			fm_learn_sgd_element* fml_sgd = dynamic_cast<fm_learn_sgd_element*>(fml);
			delete fml_sgd;

		}
		else if (string(config["method"].GetString()) == "mcmc")
		{
			fm_learn_mcmc_simultaneous* fml_mcmc = dynamic_cast<fm_learn_mcmc_simultaneous*>(fml);
			delete fml_mcmc;
		}

	} catch (std::string &e)
	{
		std::cerr << std::endl << "ERROR: " << e << std::endl;
	} catch (char const* &e)
	{
		std::cerr << std::endl << "ERROR: " << e << std::endl;
	}

	return 0;
}
int executeFM(string train_filename, string test_filename, int k, int learn_iter, int ix) {
	std::ostringstream stringStream;
	string stats_filename, results_filename;
	string k_string;
	string aux_str;
	int aux_idx;
	// Make k part of the string
	stringStream.str("");
	stringStream << "k" << k;
	/* Make the replacement for stats filepath*/
	stats_filename = train_filename;
	// Replace dir Data/ -> Results/
	aux_str = DATA_DIR;
	stats_filename = stats_filename.replace(stats_filename.find(aux_str.c_str(), 0), aux_str.length(), RESULTS_DIR);
	// Replace base.lib extension for csv
	aux_str = "base.libfm";
	if ((aux_idx = stats_filename.find(aux_str.c_str(), 0)) < 0) {
		aux_str = "base";
		stats_filename.find(aux_str.c_str(), 0);
	}
	stringStream << STATS_EXT;
	k_string = stringStream.str();
	stats_filename = stats_filename.replace(stats_filename.find(aux_str.c_str(), 0), aux_str.length(), k_string);
	std::cout << "Stats file path is " << stats_filename << endl;
	/* Make the replacement for results filepath*/
	results_filename = stats_filename;
	aux_str = STATS_EXT;
	results_filename = results_filename.replace(results_filename.find(aux_str.c_str(), 0), aux_str.length(), RESULTS_EXT);
	std::cout << "Results file path is " << stats_filename << endl;
	srand(time(NULL));
	try {
		stringStream.str("");
		std::cout << "Loading train dataset...\t" << endl;
		Data train(0, 0, 1);
		train.load(train_filename);
		train.debug();
		Data test(0, 0, 1);
		std::cout << "Loading test dataset... \t" << endl;
		test.load(test_filename);
		test.debug();
		Data* validation = NULL;
		DVector<RelationData*> relation;
		// (1.2) Load relational data
		{
			vector<string> rel = {};
			// std::cout << "#relations: " << rel.size() << endl;
			relation.setSize(rel.size());
			train.relation.setSize(rel.size());
			test.relation.setSize(rel.size());
			for (uint i = 0; i < rel.size(); i++) {
				relation(i) = new RelationData(0, true, false);
				relation(i)->load(rel[i]);
				train.relation(i).data = relation(i);
				test.relation(i).data = relation(i);
				train.relation(i).load(rel[i] + ".train", train.num_cases);
				test.relation(i).load(rel[i] + ".test", test.num_cases);
			}
		}
		// std::cout << "Loading meta data...\t" << endl;
		// (main table)
		uint num_all_attribute = max(train.num_feature, test.num_feature);
		if (validation != NULL) {
			num_all_attribute = max(num_all_attribute, (uint)validation->num_feature);
		}
		DataMetaInfo meta_main(num_all_attribute);
		// meta_main.loadGroupsFromFile(cmdline.getValue(param_meta_file));

		// build the joined meta table
		for (uint r = 0; r < train.relation.dim; r++) {
			train.relation(r).data->attr_offset = num_all_attribute;
			num_all_attribute += train.relation(r).data->num_feature;
		}
		DataMetaInfo meta(num_all_attribute);
		{
			meta.num_attr_groups = meta_main.num_attr_groups;
			for (uint r = 0; r < relation.dim; r++) {
				meta.num_attr_groups += relation(r)->meta->num_attr_groups;
			}
			meta.num_attr_per_group.setSize(meta.num_attr_groups);
			meta.num_attr_per_group.init(0);
			for (uint i = 0; i < meta_main.attr_group.dim; i++) {
				meta.attr_group(i) = meta_main.attr_group(i);
				meta.num_attr_per_group(meta.attr_group(i))++;
			}

			uint attr_cntr = meta_main.attr_group.dim;
			uint attr_group_cntr = meta_main.num_attr_groups;
			for (uint r = 0; r < relation.dim; r++) {
				for (uint i = 0; i < relation(r)->meta->attr_group.dim; i++) {
					meta.attr_group(i + attr_cntr) = attr_group_cntr + relation(r)->meta->attr_group(i);
					meta.num_attr_per_group(attr_group_cntr + relation(r)->meta->attr_group(i))++;
				}
				attr_cntr += relation(r)->meta->attr_group.dim;
				attr_group_cntr += relation(r)->meta->num_attr_groups;
			}
			meta.debug();

		}
		meta.num_relations = train.relation.dim;
		// (2) Setup the factorization machine
		fm_model fm;
		{
			fm.num_attribute = num_all_attribute;
			fm.init_stdev = 0.1;
			// set the number of dimensions in the factorization
			{
				vector<int> dim = { 1, 1, k };
				assert(dim.size() == 3);
				fm.k0 = dim[0] != 0;
				fm.k1 = dim[1] != 0;
				fm.num_factor = dim[2];
			}
			fm.init();
		}
		// Setup the learning method:
		fm_learn* fml;
		fm.w.init_normal(fm.init_mean, fm.init_stdev);
		fml = new fm_learn_mcmc_simultaneous();
		fml->validation = validation;
		((fm_learn_mcmc*)fml)->num_iter = learn_iter;
		((fm_learn_mcmc*)fml)->num_eval_cases = test.num_cases;
		((fm_learn_mcmc*)fml)->do_sample = true;
		((fm_learn_mcmc*)fml)->do_multilevel = true;
		fml->fm = &fm;
		fml->max_target = train.max_target;
		fml->min_target = train.min_target;
		fml->task = 0;
		fml->meta = &meta;
		// std::cout << "Opening output file" << endl;
		RLog* rlog = NULL;
		ofstream* out_rlog = NULL;
		out_rlog = new ofstream(stats_filename);
		if (!out_rlog->is_open())	{
			throw "Unable to open file " + stats_filename;
		}
		// std::cout << "logging to " << r_log_str.c_str() << endl;
		rlog = new RLog(out_rlog);
		// 
		fml->log = rlog;
		fml->init();
		// set the regularization; for als and mcmc this can be individual per group
		vector<double> reg = {};
		assert((reg.size() == 0) || (reg.size() == 1) || (reg.size() == 3) || (reg.size() == (1 + meta.num_attr_groups * 2)));
		if (reg.size() == 0) {
			fm.reg0 = 0.0;
			fm.regw = 0.0;
			fm.regv = 0.0;
			((fm_learn_mcmc*)fml)->w_lambda.init(fm.regw);
			((fm_learn_mcmc*)fml)->v_lambda.init(fm.regv);
		}
		else if (reg.size() == 1) {
			fm.reg0 = reg[0];
			fm.regw = reg[0];
			fm.regv = reg[0];
			((fm_learn_mcmc*)fml)->w_lambda.init(fm.regw);
			((fm_learn_mcmc*)fml)->v_lambda.init(fm.regv);
		}
		else if (reg.size() == 3) {
			fm.reg0 = reg[0];
			fm.regw = reg[1];
			fm.regv = reg[2];
			((fm_learn_mcmc*)fml)->w_lambda.init(fm.regw);
			((fm_learn_mcmc*)fml)->v_lambda.init(fm.regv);
		}
		else {
			fm.reg0 = reg[0];
			fm.regw = 0.0;
			fm.regv = 0.0;
			int j = 1;
			for (uint g = 0; g < meta.num_attr_groups; g++) {
				((fm_learn_mcmc*)fml)->w_lambda(g) = reg[j];
				j++;
			}
			for (uint g = 0; g < meta.num_attr_groups; g++) {
				for (int f = 0; f < fm.num_factor; f++) {
					((fm_learn_mcmc*)fml)->v_lambda(g, f) = reg[j];
				}
				j++;
			}
		}
		if (rlog != NULL) {
			rlog->init();
		}
		fm.debug();
		fml->debug();
		// () learn		
		fml->learn(train, test);
		std::cout << "Save prediction" << endl;
		DVector<double> pred;
		pred.setSize(test.num_cases);
		fml->predict(test, pred);
		pred.save(results_filename);
	}
	catch (string &e) {
		cerr << endl << "ERROR: " << e << endl;
	}
	catch (char const* &e) {
		cerr << endl << "ERROR: " << e << endl;
	}
	
	return 0;
}