Exemplo n.º 1
0
// run from build folder of the project (see path below)
int main (int argc, char** argv) {
  
  Eigen::MatrixXf X;
  Eigen::MatrixXf y;
  eigen_extensions::loadASCII("../test/svmtestx.eig.txt", &X);
  eigen_extensions::loadASCII("../test/svmtesty.eig.txt", &y);
  
  // Classify
  cout << X.topRows(5) << endl;
  cout << y.topRows(5) << endl;
  vector<int> yhat;
  SVMClassifier svm;
  svm.train(X, y);
  svm.test(X, yhat);
  
  Eigen::MatrixXf w;
  float b;
  svm.getw(w, b);
  Eigen::MatrixXf margin= ((X * w).array() + b).matrix(); // ahh eigen...
  
  // Evaluate accuracy and print results
  int match=0;
  for(int i=0;i<yhat.size();i++) {
    if(yhat[i]==(int)y(i)) match++; else printf("WRONG! ");
    printf("y= %d, yhat= %d margin= %f\n", (int)y(i), yhat[i], margin(i));
  }
  printf("MATLAB cross-check: last margin should be around -3.107\n");
  printf("Accuracy= %f. From MATLAB cross-check, expect this to be around 0.945\n", 1.0*match/yhat.size());
  
  // Save the model
  svm.saveModel("temp.svmmodel");
  
  // Test loading model in new instance of SVMClassifier
  SVMClassifier svm2;
  svm2.loadModel("temp.svmmodel");
  yhat.clear();
  svm2.test(X, yhat);
  match=0; 
  for(int i=0;i<yhat.size();i++) if(yhat[i]==(int)y(i)) match++;
  printf("Accuracy= %f from loaded model. Should be 0.945 again.\n", 1.0*match/yhat.size());
  
  
  printf("you may want to rm the temporary file temp.svmmodel\n");
}
Exemplo n.º 2
0
int Shell::split(QVariantList points)
{
	QPointF point;

	//omit last value, because it is always zero
	int dataSize = points.size()-1;

	Eigen::MatrixXf x; x.resize(dataSize,2);
	Eigen::VectorXf y(dataSize);

  //create the linear eq system in the form of y = beta1*x + beta2*1
	for( int i = 0; i < dataSize; i++)
	{
		point = points[i].toPointF();
		x(i, 0) = 1.0f; //beta for y-intercept
		x(i, 1) = point.x(); //beta for slope (dependent on x)

		y(i) = point.y(); 
	}

	//Error function (least squares of ax+b)
	auto error = [](Regression reg, int b, int a)->float
	{
		float result = 0;
		for(int i=0 ; i< reg.y.size(); i++)
		{
			float functionValue = a*reg.x(i, 1)+ b;
			float squarederror = std::pow(reg.y(i) - functionValue, 2);
			result+=squarederror;
		}
		return result;

	};

	//Perform all pairs of regressions
	float lowestError = std::numeric_limits<float>::max();
	float r1a, r1b;
	float r2a, r2b;
	int splitIndex = 0;

	for( int i = 2; i < dataSize; i++)
	{
		Regression reg1; reg1.x = x.topRows(i); reg1.y = y.head(i);
		Regression reg2; reg2.x = x.bottomRows(dataSize-i); reg2.y = y.tail(dataSize-i);

		Eigen::MatrixXf reg1Result = ((reg1.x.transpose() * reg1.x).inverse() * reg1.x.transpose()) * reg1.y;
		Eigen::MatrixXf reg2Result = ((reg2.x.transpose() * reg2.x).inverse() * reg2.x.transpose()) * reg2.y;

		float currentError = error(reg1,reg1Result(0),reg1Result(1)) + error(reg2,reg2Result(0),reg2Result(1));
		if (currentError < lowestError)
		{
			r1a = reg1Result(1); r1b = reg1Result(0);
			r2a = reg2Result(1); r2b = reg2Result(0);
			lowestError = currentError;
			splitIndex = i;
		}

	}
	std::cout << "r1:" << r1a << "x + " << r1b << std::endl;
	std::cout << "r2:" << r2a << "x + " << r2b << std::endl;
	std::cout << "(smallest error:" << lowestError << ")" << std::endl;

	return splitIndex;
}