示例#1
0
//normal random could accerlate the speed of convergence 
void MFBiasApp() {
  if (!FLAGS_flag_mfbias) {
    return;
  }
  SpMat train;
  SpMat test;
  std::pair<int, int> p = ReadData(FLAGS_train_path, &train);
  ReadData(FLAGS_test_path, &test);
  MF mf;
  RandomInit(p.second, p.first, FLAGS_k, &mf);
  SGD(FLAGS_it_num, FLAGS_eta, FLAGS_lambda, train, test, &mf);
}
示例#2
0
void network::learning(const train_couple* training_data, int data_count, int batch_size, double eta, int epoch_number)
{
	train_couple* data_copy=new train_couple[data_count]; //data to shuffle later in order to have no affect on original training_data
	for (int i=0; i<data_count; i++)
		data_copy[i]=training_data[i];

	int nbatches=data_count/batch_size;

	for (int i=0; i<epoch_number; i++) {
		shuffle(data_copy,data_count);

		for (int batch_number=0; batch_number<nbatches; batch_number++){
			train_couple* mini_batch=get_batch(data_copy,batch_size,batch_number);

			SGD(mini_batch,batch_size,eta,data_count);

			delete [] mini_batch;

		}
		cout<<"epoch # "<<i<<" has been finished\n";
	}

	delete [] data_copy;
}
示例#3
0
fmat SGD(const fmat& X, const fmat& Y) {
    return SGD(X, Y, ALPHA);
}