Exemplo n.º 1
0
double NeuralNet::trainNet(const std::vector<double>& data, const std::vector<double>& trainingOutput, const unsigned int outType)
{

  int outputLayer = m_layers.size();
  int inputLayer = 0;
  
  std::vector<double> output,error,delta,prevOut,nextDeltas;
  double cost = 0.0;
  
  // run net forward
  output = runNet( data );
  error = computeError( output, trainingOutput );
  switch(outType)
  {
    case SCALAR:
      cost = computeMSE( error );
      break;
    case PROB:
      cost = logloss( output, trainingOutput );
      break;
  }

  // propagate error backward through layers
  for(int i=outputLayer; i>0; i--)
  {

    // calculate initial gradient
    if( i==outputLayer )
      delta = error;
    else
      delta = m_layers[i-1]->computeDeltas(error,m_layers[i]->retrieveWeights());
    if( i > 1)
      prevOut = m_layers[ i-1-1 ]->retrieveOutputs();
    if( i <= 1)
      prevOut = data;
    m_layers[i-1]->updateWeights( prevOut,delta,m_learningRate,m_momentum,m_weightDecay );
    error = delta;
  
  }
  
  return cost;

}
Exemplo n.º 2
0
void bestFit()
{
	initHF();

	while(1)
	{
	
//		uint32_t start_time = getClockTime();
		// read in the samples one by one.
		// for each sample, simultaneously calculate
		// the incremental inner product across all the hF polynomials.
		RxAndComputeInnerProducts();

		// At this point you have the projections.  Calculate
		// MSE for each projection..
		computeMSE();

		// At this point, we have the best SI.
		double SI = best_sigma_index;
		write_float64("out0_data", SI);
//		write_float64("out1_data", dotP0[0]);

		int I;
			write_float64("out1_data", dotP0[best_sigma_index]);
			write_float64("out1_data", dotP1[best_sigma_index]);
			write_float64("out1_data", dotP2[best_sigma_index]);
			write_float64("out1_data", dotP3[best_sigma_index]);
			write_float64("out1_data", dotP4[best_sigma_index]);
			write_float64("out1_data", dotP5[best_sigma_index]);
//		}
//		uint32_t end_time = getClockTime();

//		write_uint32("elapsed_time_pipe", (end_time - start_time));

		initFit();

	}
}
Exemplo n.º 3
0
void runPmfBatchGradientDescentOMP(vector<users> &allUsers,
		vector<business> &allBusiness) {

	/* Switch off the dynamic thread setting. */
	omp_set_dynamic(0);

	/* Set the number of threads. */
	omp_set_num_threads(NO_OF_TRIALS);

	/* Output file object. */
	ofstream fout;

	/* Input file object. */
	ifstream fin;

#if !FORCE_INPUT
	editInputBatchTextForGradientDescent();
#endif

	/* Open input batch file mentioning different models. */
	fin.open(BATCH_INPUT_TEXT);

	/* Read number of models. */
	unsigned int n;
	fin >> n;

	/* Read the model parameters. */
	unsigned int *latentSpace = new unsigned int[n];
	unsigned int *maxIterations = new unsigned int[n];
	double *lambdaU = new double[n];
	double *lambdaV = new double[n];

	for (unsigned int i = 0; i < n; i++) {
		fin >> latentSpace[i];
		fin >> maxIterations[i];
		fin >> lambdaU[i];
		fin >> lambdaV[i];
	}

	fin.close();

	/* Log file name. */
	string logFileName;

	/* Get the log file name. */
	logFileName = getLogFileName(LOG_BATCH_RESULTS, 0, 0);

	/* Open the log file. */
	fout.open(logFileName.c_str());

	/* Now run each of these models and test them against the validation and
	 * test data sets. */
	for (unsigned int i = 0; i < n; i++) {

		/* Declare the model. */
		collaborativeFiltering collabFilteringModel[NO_OF_TRIALS];

		/* Initialize the root mean square errors to 0.*/
		double rmseTraining[NO_OF_TRIALS] = { 0.0 };
		double rmseValidation[NO_OF_TRIALS] = { 0.0 };
		double rmseTest[NO_OF_TRIALS] = { 0.0 };
		double rmsTrainError = 0.0;
		double rmsValidationError = 0.0;
		double rmsTestError = 0.0;

		/* Train the model for some fixed number of times and accumulate the
		 * error results and log its average values. */

		/* Launch as per given thread ID. */
		cout << "Running PMF Algorithm with K = " << latentSpace[i] << " for "
				<< maxIterations[i] << " iterations, lambda U = " << lambdaU[i]
				<< ", lambda V = " << lambdaV[i] << endl << endl;

		collaborativeFiltering collabFilteringTest;
		/* Initialize the model. */
		initCollabFilteringModel(collabFilteringTest, allUsers, allBusiness,
				latentSpace[0], maxIterations[0], lambdaU[0], lambdaV[0], true);

		vector<review> testReviews = collabFilteringTest.testReviews;
		vector<users> trainUsers = collabFilteringTest.trainUsers;
		vector<business> trainBusiness = collabFilteringTest.trainBusiness;

		deinitCollabFilteringModel(collabFilteringTest);

#pragma omp parallel
		{

			/* Get the thread ID. */
			unsigned int j = omp_get_thread_num();

			/* Initialize the model. */
			collabFilteringModel[j].testReviews = testReviews;
			initCollabFilteringModel(collabFilteringModel[j], trainUsers,
					trainBusiness, latentSpace[i], maxIterations[i], lambdaU[i],
					lambdaV[i], false);

			/* Train the model. */
			probablisticMatrixFactorizationGradientDescent(
					collabFilteringModel[j]);

#if LOG_FEATURES
			cout << "Logging the computed features." << endl;

			/* Save the estimated user and business data. */
			logUserBusinessFeatures(collabFilteringModel[j]);

			cout << "Validating the computed features." << endl;

			/* Validate the computed features with existing reviews on training
			 * data. */
			validateAndLogReviews(collabFilteringModel[j], TRAINING_DATA);

			/* Validate the computed features with existing reviews on
			 * validation data. */
			validateAndLogReviews(collabFilteringModel[j], VALIDATION_DATA);

			/* Validate the computed features with existing reviews on testing
			 * data. */
			validateAndLogReviews(collabFilteringModel[j], TESTING_DATA);
#endif

#if LOG_MSE
			cout << "Logging the mean square error after every iteration."
			<< endl;

			/* Log the mean square error. */
			logMsePerIteration(collabFilteringModel[j]);
#endif

			/* Compute the error on training data set. */
			rmseTraining[j] = computeMSE(collabFilteringModel[j],
					TRAINING_DATA);

			/* Compute the error on validation data set. */
			rmseValidation[j] = computeMSE(collabFilteringModel[j],
					VALIDATION_DATA);

			/* Compute the error on testing data set. */
			rmseTest[j] = computeMSE(collabFilteringModel[j], TESTING_DATA);

			/* De-initialize the model. */
			deinitCollabFilteringModel(collabFilteringModel[j]);
		}

		for (unsigned int j = 0; j < NO_OF_TRIALS; j++) {
			rmsTestError += rmseTest[j];
			rmsValidationError += rmseValidation[j];
			rmsTrainError += rmseTraining[j];
		}

		/* Log the findings along with the model specifications. */
		fout << "Model " << setw(7) << i + 1 << ", ";
		fout << setw(13) << setprecision(5) << latentSpace[i] << ", ";
		fout << setw(13) << setprecision(5) << maxIterations[i] << ", ";
		fout << setw(13) << setprecision(5) << lambdaU[i] << ", ";
		fout << setw(13) << setprecision(5) << lambdaV[i] << ", ";
		fout << setw(13) << setprecision(5) << rmsTrainError / NO_OF_TRIALS
				<< ", ";
		fout << setw(13) << setprecision(5) << rmsValidationError / NO_OF_TRIALS
				<< ", ";
		fout << setw(13) << setprecision(5) << rmsTestError / NO_OF_TRIALS
				<< ", ";
		fout << endl;
	}

	/* Close the log file.*/
	fout.close();

	/* Switch on the dynamic thread setting. */
	omp_set_dynamic(1);

	delete latentSpace;
	delete maxIterations;
	delete lambdaU;
	delete lambdaV;
}