Exemplo n.º 1
0
	void get_samples()
	{
		ASSERT(samples.size() == fetchers.size());
		for (size_t i = 0; i < samples.size(); ++i)
		{
			CDenseFeatures<float64_t> *ptr = (CDenseFeatures<float64_t>*)fetchers[i]->fetch(samples[i].ptr);
			ptr->get_feature_matrix().display_matrix();
		}
	}
int main(int, char*[])
{
	init_shogun_with_defaults();

#ifdef HAVE_LAPACK // for CDataGenerator::generate_gaussian()

	// initialize the random number generator with a fixed seed, for repeatability
	CMath::init_random(10);

	// Prepare the training data
	const int num_features = 20;
	const int num_classes = 4;
	const int num_examples_per_class = 20;

	SGMatrix<float64_t> X;
	try
	{
		X = CDataGenerator::generate_gaussians(
			num_examples_per_class,num_classes,num_features);
	}
	catch (ShogunException e)
	{
		// out of memory
		SG_SPRINT(e.get_exception_string());
		return 0;
	}

	CDenseFeatures<float64_t>* features = new CDenseFeatures<float64_t>(X);

	// Create a deep autoencoder
	CNeuralLayers* layers = new CNeuralLayers();
	layers
		->input(num_features)
		->rectified_linear(10)->rectified_linear(5)->rectified_linear(10)
		->linear(num_features);
	CDeepAutoencoder* ae = new CDeepAutoencoder(layers->done());

	// uncomment this line to enable info logging
	// ae->io->set_loglevel(MSG_INFO);

	// pre-train
	ae->pt_epsilon.set_const(1e-6);
	ae->pre_train(features);

	// fine-tune
	ae->train(features);

	// reconstruct the data
	CDenseFeatures<float64_t>* reconstructions = ae->reconstruct(features);
	SGMatrix<float64_t> X_reconstructed = reconstructions->get_feature_matrix();

	// find the average difference between the data and the reconstructions
	float64_t avg_diff = 0;
	int32_t N = X.num_rows*X.num_cols;
	for (int32_t i=0; i<N; i++)
		avg_diff += CMath::abs(X[i]-X_reconstructed[i])/CMath::abs(X[i]);
	avg_diff /= N;

	SG_SINFO("Average difference = %f %\n", avg_diff*100);

	// Clean up
	SG_UNREF(ae);
	SG_UNREF(layers);
	SG_UNREF(features);
	SG_UNREF(reconstructions);

#endif

	exit_shogun();
	return 0;
}