Пример #1
0
void testMNISTLoading(){
    MNISTDataset mnist;
    mnist.loadData();
    MLP mlp("result/MLPModel.dat");

    TrainModel mlpModel(mlp);
    printf("validate error : %.8lf%%\n", 100.0 * mlpModel.getValidError(&mnist, 20));
}
Пример #2
0
int main(int argc, char const *argv[])
{
	layers[0] = 2;
	layers[1] = 2;
	layers[2] = 1;
	Network mlp(layers,sigmoid);

	unsigned epochs = 20000;
	double learning = 0.8;
	double momentum = 0.3;

	if (argc > 1) {
		epochs = strtoul(argv[1], 0, 0);
		if (argc > 2)
		learning = atof(argv[2]);
		if (argc == 4)
			momentum = atof(argv[3]);
	}

	array<double,2> input;
	input[0] = 0;
	input[1] = 0;
	truthTable.add(input,0);
	input[1] = 1;
	truthTable.add(input,1);
	input[0] = 1;
	input[1] = 0;
	truthTable.add(input,1);
	input[1] = 1;
	truthTable.add(input,0);
	
	time_t start,end;
	time(&start);
	mlp.learn(truthTable,learning,momentum,epochs);
	time(&end);
	std::cout << difftime(end,start) << "\n";

	for (unsigned i = 1; i < 3; i++)
		for (unsigned j = 0; j < layers[i]; j++)
			for (unsigned k = 0; k < layers[i-1]; k++)
				std::cout << "w"<<i<<"("<<j<<","<<k<<"): "<< mlp[i][j][k].weight << "\n";
	std::cout << "\n";
	for (unsigned i = 0; i < truthTable.instances(); i++) {
		mlp = truthTable.input(i).toPointer();
		mlp.update();
		std::cout << "\t" << truthTable.honestOutput(i) << " -- " << mlp[2]()[0];
		std::cout << " {" << (truthTable.honestOutput(i) - mlp[2]()[0]) * (truthTable.honestOutput(i) - mlp[2]()[0]) << "}\n";
	}
	return 0;
}
Пример #3
0
void MLearn::runML(){
	Mat trainingData ( numTrainingPoints , 2 , CV_32FC1 ) ;
	Mat testData ( numTestPoints , 2 , CV_32FC1 ) ;
	randu ( trainingData ,0 ,1) ;
	randu ( testData ,0 ,1) ;
	Mat trainingClasses = labelData ( trainingData , eq ) ;
	Mat testClasses = labelData ( testData , eq ) ;
	plot_binary ( trainingData , trainingClasses , " Training Data " ) ;
	plot_binary ( testData , testClasses , " Test Data " ) ;
	svm ( trainingData , trainingClasses , testData , testClasses ) ;
	mlp ( trainingData , trainingClasses , testData , testClasses ) ;
	knn ( trainingData , trainingClasses , testData , testClasses , 3) ;
	bayes ( trainingData , trainingClasses , testData , testClasses ) ;
	decisiontree ( trainingData , trainingClasses , testData , testClasses ) ;
	waitKey () ;
}
Пример #4
0
void setError(
    ProviderVector &miVector,
    String &errorMessage,
    const String &realProviderName,
    const char *generic,
    const char *spec,
    const CMPIString *optMsg)
{
    PEG_METHOD_ENTER(TRC_CMPIPROVIDERINTERFACE, "CMPIProvider:setError()");
    if (errorMessage.size() > 0)
    {
        errorMessage.append("; ");
    }

    String MItype;
    if (miVector.genericMode)
    {
        MItype.append(generic);
    }
    else
    {
        MItype.append(realProviderName);
        MItype.append(spec);
    }

    if (optMsg && CMGetCharsPtr(optMsg,NULL))
    {
        MessageLoaderParms mlp(
            "ProviderManager.CMPI.CMPIProvider.MESSAGE_WAS",
            "$0, message was: $1",
            MItype,
            CMGetCharsPtr(optMsg,NULL));

        errorMessage.append(MessageLoader::getMessage(mlp));
    }
    else
    {
        errorMessage.append(MItype);
    }
    PEG_METHOD_EXIT();
}
Пример #5
0
int main(int argc, char* argv[]) {
	bool verbose;
	bool interior_point;
	double nu;
	bool weight_sharing;
	bool force;
	std::string train_filename;
	std::string output_filename;
	std::string solver;
	std::string mpsfile;

	// Command line options
	po::options_description generic("Generic Options");
	generic.add_options()
		("help", "Produce help message")
		("verbose", "Verbose output")
		;

	po::options_description input_options("Input/Output Options");
	input_options.add_options()
		("train", po::value<std::string>
			(&train_filename)->default_value("training.txt"),
			"Training file in \"label s0-m0.txt s0-m1.txt ...\" format, "
			"one sample per row.")
		("output", po::value<std::string>
			(&output_filename)->default_value("output.txt"),
			"File to write weight matrix to.  If \"--weight_sharing 1\" is "
			"used, this is a single line containing the alpha vector.  If "
			"no weight sharing is used, it is a matrix with number-of-classes "
			"rows and number-of-weak-learners columns.")
		("force", po::value<bool>(&force)->default_value(false),
			"Force overwriting the output file.  Otherwise, if the "
			"output file already exists, the program is aborted immediately.")
		("writemps", po::value<std::string>(&mpsfile)->default_value(""),
			"Write linear programming problem as MPS file.")
		;

	po::options_description lpboost_options("LPBoost Options");
	lpboost_options.add_options()
		("nu", po::value<double>(&nu)->default_value(0.1),
			"nu-parameter for 2-class LPBoost.  A larger value "
			"indicates stronger regularization")
		("weight_sharing", po::value<bool>(&weight_sharing)->default_value(true),
			"Share classifier weights among all classes.")
		("interior_point",
			po::value<bool>(&interior_point)->default_value(true),
			"Use interior point (true) or simplex method (false) to "
			"solve the LPBoost master problem")
		("solver", po::value<std::string>(&solver)->default_value("clp"),
			"LP solver to use.  One of \"clp\" or \"mosek\".")
		;

	po::options_description all_options;
	all_options.add(generic).add(input_options).add(lpboost_options);
	po::variables_map vm;
	po::store(po::command_line_parser(argc, argv).options(all_options).run(), vm);
	po::notify(vm);

	// Boolean flags
	verbose = vm.count("verbose");

	if (vm.count("help")) {
		std::cerr << "mclp $Id: mclp.cpp 1229 2008-03-10 10:26:34Z nowozin $" << std::endl;
		std::cerr << "===================================================="
			<< "===========================" << std::endl;
		std::cerr << "Copyright (C) 2008 -- "
			<< "Sebastian Nowozin <*****@*****.**>"
			<< std::endl;
		std::cerr << std::endl;
		std::cerr << "Usage: mclp [options]" << std::endl;
		std::cerr << std::endl;
		std::cerr << "Train a multiclass LPBoost model for given and fixed multiclass "
			<< "weak learners." << std::endl;
		std::cerr << all_options << std::endl;

		exit(EXIT_SUCCESS);
	}

	// Check if output file already exists
	if (boost::filesystem::exists(boost::filesystem::path(output_filename))
		&& force == false) {
		std::cout << "Output file \"" << output_filename << "\" "
			<< "already exists, exiting." << std::endl;
		exit(EXIT_SUCCESS);
	}

	// Read in training data
	std::cout << "Training file: " << train_filename << std::endl;
	std::vector<int> labels;	// discrete class labels, >= 0, < K.
	std::vector<std::vector<std::string> > data_S_M;	// [n][m]
	int number_classes = read_problem(train_filename, labels, data_S_M);
	if (number_classes <= 0) {
		std::cerr << "Failed to read in training data." << std::endl;
		exit(EXIT_FAILURE);
	}
	std::cout << labels.size() << " samples, "
		<< number_classes << " classes." << std::endl;

	// Instantiate multiclass classifier and fill it with training data
	Boosting::LPBoostMulticlassClassifier mlp(number_classes, nu, weight_sharing);
	mlp.InitializeBoosting(labels, interior_point, solver);
	read_problem_data(mlp, data_S_M, number_classes);

	if (mpsfile.empty() == false)
		mlp.WriteMPS(mpsfile);

	// Solve
	std::cout << "Solving linear program..." << std::endl;
	mlp.Update();
	std::cout << "Done." << std::endl;
	std::cout << "Soft margin " << mlp.Rho() << ", objective "
		<< mlp.Gamma() << std::endl;

	// Print weights
	const std::vector<std::vector<double> >& clw = mlp.ClassifierWeights();
	std::cout << "Writing (K,M) weight matrix to \""
		<< output_filename << "\", K = "
		<< (weight_sharing ? 1 : number_classes)
		<< ", M = " << clw[0].size() << std::endl;

	std::ofstream wout(output_filename.c_str());
	if (wout.fail()) {
		std::cerr << "Failed to open \"" << output_filename
			<< "\" for writing." << std::endl;
		exit(EXIT_FAILURE);
	}
	wout << std::setprecision(12);
	for (unsigned int aidx = 0; aidx < clw.size(); ++aidx) {
		for (unsigned int bidx = 0; bidx < clw[aidx].size(); ++bidx) {
			wout << (bidx == 0 ? "" : " ") << clw[aidx][bidx];
		}
		wout << std::endl;
	}
	wout.close();

	exit(EXIT_SUCCESS);
}
Пример #6
0
int main()
{
	int dim[]={6, 2, 1};
	TMultiLayerPerceptron mlp(3, dim, 6);
	QVector<double> point, tmp;
	QList<QVector<double> > sample = sampleGen();
	double a1 = 0, b1 = 0, c1 = 0, r = 2, a11 = 0, b11 = 0, c11 = 0;
	for (int i = 0; i < 1000; i++) {
		for (int j = 0; j < sample.size(); j += 2) {
			tmp = mlp.recognize(sample.at(j));
			mlp.learn(sample.at(j+1));
		}
		qDebug() << "------------------------------------";
		qDebug() << "----------------" << i << "-------------------";
		point.clear();
		point << 0.1 << -0.1 << 0.3 << 0 << 0 << 0;
		qDebug() << point << endl;
		double a = mlp.recognize(point)[0] - chek(point, r)[0];
		if ((a1 > a && -a1 < -a) || (a1 < a && -a1 > -a)) {
			qDebug() << a << " down";
			a11++;
		}
		else qDebug() << a << " up";
		a1 = a;

		point.clear();
		qDebug() << "------------------------------------";
		point << 0 << 1 << 1 << 1 << 0 << 0;
		qDebug() << point << endl;
		double b = mlp.recognize(point)[0] - chek(point, r)[0];

		if ((b1 > b && b > 0) || ((b1 < b && b < 0))) {
			qDebug() << b << " down";
			b11++;
		}
		else qDebug() << b << " up";
		b1 = b;

		point.clear();
		qDebug() << "------------------------------------";
		point << 0 << 0 << 0 << 0 << 0 << 0;
		qDebug() << point << endl;
		double c = mlp.recognize(point)[0] - chek(point, r)[0];
		if ((c1 > c && c > 0) || ((c1 < c && c < 0))) {
			qDebug() << c << " down";
			c11++;
		}
		else qDebug() << c << " up";
		c1 = c;
		point.clear();
	}
	qDebug() << "a" << a11;
	qDebug() << "b" << b11;
	qDebug() << "c" << c11;

	mlp.save("mlp.xml");
	//	TMultiLayerPerceptron mlp("mlp.xml");
	//	QVector<double> a;
	//	a << 12 << 6546 << 0;
	//	qDebug() << a << endl << mlp.recognize(a);
	return 0;
}
Пример #7
0
	MultilayerPerceptron* MultilayerPerceptron::FromJSON(std::istream& stream)
	{
		Reader r(stream);

		SetReader(r);
		SetErrorResult(nullptr);
		TryGetToken(Token::BeginObject);

		// ensure we're parsing an MLP
		TryGetNameValuePair("Type", Token::String);
		VerifyEqual(r.readString(), "MultilayerPerceptron");

		TryGetNameValuePair("Layers", Token::BeginArray);

		// create our MLP
		auto_ptr<MLP> mlp(new MLP());

		// read each layer
		for(Token_t t = r.next(); t == Token::BeginObject; t = r.next())
		{
			TryGetNameValuePair("Inputs", Token::Number);
			uint64_t inputs = r.readUInt();
			TryGetNameValuePair("Outputs", Token::Number);
			uint64_t outputs = r.readUInt();
			TryGetNameValuePair("Function", Token::String);
			ActivationFunction_t function = ParseFunction(r.readString().c_str());
			
			// validate these parameters
			if(inputs > (uint64_t)std::numeric_limits<uint32_t>::max() ||
			  inputs == 0 ||
			  outputs > (uint64_t)std::numeric_limits<uint32_t>::max() ||
			  outputs == 0 ||
			  function == ActivationFunction::Invalid)
			{
				return nullptr;
			}

			auto_ptr<Layer> layer(new Layer(inputs, outputs, function));

			// set biases
			TryGetNameValuePair("Biases", Token::BeginArray);
			for(uint32_t j = 0; j < outputs; j++)
			{
				TryGetToken(Token::Number);
				layer->weights.biases()[j] = (float)r.readDouble();
			}
			TryGetToken(Token::EndArray);

			// set weights
			TryGetNameValuePair("Weights", Token::BeginArray);
			for(uint32_t j = 0; j < outputs; j++)
			{
				TryGetToken(Token::BeginArray);
				for(uint32_t i = 0; i < inputs; i++)
				{
					TryGetToken(Token::Number);
					layer->weights.feature(j)[i] = (float)r.readDouble();
				}
				TryGetToken(Token::EndArray);
			}
			TryGetToken(Token::EndArray);
			mlp->AddLayer(layer.release());
			TryGetToken(Token::EndObject);
		}
		TryGetToken(Token::EndObject);

		return mlp.release();
	}