Пример #1
0
//识别阶段代码
void recognize()
{
	int i, nTestFaces  = 0;         // 测试人脸数
	CvMat * trainPersonNumMat = 0;  // 训练阶段的人脸数
	float * projectedTestFace = 0;

	// 加载测试图像,并返回测试人脸数
	nTestFaces = loadFaceImgArray("test.txt");
	printf("%d test faces loaded\n", nTestFaces);

	// 加载保存在.xml文件中的训练结果
	if( !loadTrainingData( &trainPersonNumMat ) ) return;

	// 
	projectedTestFace = (float *)cvAlloc( nEigens*sizeof(float) );
	for(i=0; i<nTestFaces; i++)
	{
		int iNearest, nearest, truth;

		//将测试图像投影到子空间中
		cvEigenDecomposite(
			faceImgArr[i],
			nEigens,
			eigenVectArr,
			0, 0,
			pAvgTrainImg,
			projectedTestFace);

		iNearest = findNearestNeighbor(projectedTestFace);
		truth    = personNumTruthMat->data.i[i];
		nearest  = trainPersonNumMat->data.i[iNearest];

		printf("nearest = %d, Truth = %d\n", nearest, truth);
	}
}
Пример #2
0
void recognize()
{
 int i, nTestFaces = 0; // the number of test images
 CvMat * trainPersonNumMat = 0; // the person numbers during training
 float * projectedTestFace = 0;
 
 // load test images and ground truth for person number
 nTestFaces = loadFaceImgArray("test.txt");
 printf("%d test faces loaded\n", nTestFaces);
 
 // load the saved training data
 if( !loadTrainingData( &trainPersonNumMat ) ) return;
 
 // project the test images onto the PCA subspace
 projectedTestFace = (float *)cvAlloc( nEigens*sizeof(float) );
 for(i=0; i<nTestFaces; i++)
 {
 int iNearest, nearest, truth;
 
 //project the test image onto the PCA subspace
 cvEigenDecomposite(
 faceImgArr[i],
 nEigens,
 eigenVectArr,
 0, 0,
 pAvgTrainImg,
 projectedTestFace);
 
 iNearest = findNearestNeighbor(projectedTestFace);
 truth = personNumTruthMat->data.i[i];
 nearest = trainPersonNumMat->data.i[iNearest];
 
 printf("nearest = %d, Truth = %d\n", nearest, truth);
 }
}
Пример #3
0
//////////////////////////////////
// recognize()
//返回本次训练通过的图片数
int faceRecognition::recognize()
{   
	int i, nTestFaces  = 0;         // the number of test images
	CvMat * trainPersonNumMat = 0;  // the person numbers during training
	float * projectedTestFace = 0;
	// load test images and ground truth for person number
	nTestFaces = loadFaceImgArray(testFileListPath);
	// load the saved training data
	if( !loadTrainingData( &trainPersonNumMat ) ) return 0;
	// project the test images onto the PCA subspace
	projectedTestFace = (float *)cvAlloc( nEigens*sizeof(float) );
	//处理输出结果,记录一次训练中正确匹配的图片数
	int count=0;
	for(i=0; i<nTestFaces; i++)
	{  int iNearest, nearest, truth;
		// project the test image onto the PCA subspace
		cvEigenDecomposite(
			faceImgArr[i],
			nEigens,
			eigenVectArr,
			0, 0,
			pAvgTrainImg,
			projectedTestFace);
		iNearest = findNearestNeighbor(projectedTestFace);
		truth    = personNumTruthMat->data.i[i];
		nearest  = trainPersonNumMat->data.i[iNearest];
	//	AfxMessageBox("nearest = %d, Truth = %d", nearest, truth);
		if(nearest==truth)
			  count++;
	}//for
	return count;
}
Пример #4
0
bool z3ann::loadTrainingDataSet(const std::vector<trainingData> &data)
    {
        std::vector<trainingData>::const_iterator it = data.begin();
        std::vector<trainingData>::const_iterator end = data.end();

        // _equations.resize(data.size());
        for (; it != end; ++it)
            if (!loadTrainingData(*it))
                return false;
        return true;
    }
Пример #5
0
Файл: RAE.cpp Проект: zerkh/RAE
void RAE::training()
{
	x = lbfgs_malloc(getRAEWeightSize());
	Map<MatrixLBFGS>(x, getRAEWeightSize(), 1).setRandom();
	lbfgs_parameter_t param;
	iterTimes = atoi(para->getPara("IterationTime").c_str());

	loadTrainingData();
	lbfgs_parameter_init(&param);
	param.max_iterations = iterTimes;

	lbfgsfloatval_t fx = 0;
	int ret;

	ret = lbfgs(getRAEWeightSize(), x, &fx, RAELBFGS::evaluate, RAELBFGS::progress, this, &param);

	cout << "L-BFGS optimization terminated with status code = " << ret << endl;
	cout << " fx = " << fx << endl;

	updateWeights(x);
	logWeights(para);
	trainingData.clear();
	lbfgs_free(x);
}
// Recognize the face in each of the test images given, and compare the results with the truth.
void recognizeFileList(char *szFileTest)
{
    int i, nTestFaces  = 0;         // the number of test images
    CvMat * trainPersonNumMat = 0;  // the person numbers during training
    float * projectedTestFace = 0;
    char *answer;
    int nCorrect = 0;
    int nWrong = 0;
    double timeFaceRecognizeStart;
    double tallyFaceRecognizeTime;
    float confidence;
    
    // load test images and ground truth for person number
    nTestFaces = loadFaceImgArray(szFileTest);
    printf("%d test faces loaded\n", nTestFaces);
    
    // load the saved training data
    if( !loadTrainingData( &trainPersonNumMat ) ) return;
    
    // project the test images onto the PCA subspace
    projectedTestFace = (float *)cvAlloc( nEigens*sizeof(float) );
    timeFaceRecognizeStart = (double)cvGetTickCount();	// Record the timing.
    for(i=0; i<nTestFaces; i++)
    {
        int iNearest, nearest, truth;
        
        // project the test image onto the PCA subspace
        cvEigenDecomposite(
                           faceImgArr[i],
                           nEigens,
                           eigenVectArr,
                           0, 0,
                           pAvgTrainImg,
                           projectedTestFace);
        
        iNearest = findNearestNeighbor(projectedTestFace, &confidence);
        truth    = personNumTruthMat->data.i[i];
        if(iNearest!=-1)
        {
            nearest  = trainPersonNumMat->data.i[iNearest];
        }
        else
        {
            nearest = 0;
        }
        
        if (nearest == truth) {
            answer = "Correct";
            nCorrect++;
        }
        else {
            answer = "WRONG!";
            nWrong++;
        }
        printf("nearest = %d, Truth = %d (%s). Confidence = %f\n", nearest, truth, answer, confidence);
    }
    tallyFaceRecognizeTime = (double)cvGetTickCount() - timeFaceRecognizeStart;
    if (nCorrect+nWrong > 0) {
        printf("TOTAL ACCURACY: %d%% out of %d tests.\n", nCorrect * 100/(nCorrect+nWrong), (nCorrect+nWrong));
        printf("TOTAL TIME: %.1fms average.\n", tallyFaceRecognizeTime/((double)cvGetTickFrequency() * 1000.0 * (nCorrect+nWrong) ) );
    }
    
}
void TrainingDataRenderer::init()
{
  loadTrainingData("assets/mocap_data/positions_only_mocap_data.txt", false);
  loadTrainingData("assets/mocap_data/positions_only_mocap_labels.txt", true);
  //For now just hard code some stuff in here to see if everything else works!
  _shader = new Shader();
  _shader->registerShader("shaders/vertColoredVerts.glsl", ShaderType::VERTEX);
  _shader->registerShader("shaders/fragColoredVerts.glsl", ShaderType::FRAGMENT);
  _shader->compileShaders();
  _shader->linkShaders();

  std::vector<Color> colors = {
    Color::white(),
    Color::black(),
    Color::red(),
    Color::blue(),
    Color::green()
  };
  
  Vector3 normal(1.0, 0.0,0.0);
  Vector2 uv(0.0,0.0);
  Vector4 tangent(1.0, 0.0, 0.0, 0.0);
  
  _mesh = new Mesh();
  for(std::vector<vec3> row : _positions)
  {
    int color_idx= 0;
    for(vec3 v : row)
    {
      Color c = colors[color_idx];
      Vector3 c3(c.r,c.g,c.b);
      Vector4 c4(c.r,c.g,c.b, c.a);
      Vector3 p(v.x ,v.y,v.z);
      _mesh->addPosition(p);
      
      //FIXME:  THE COLOR CHANNEL IS OFF/INCORRECT.  HAD TO STUFF COLORS INTO THE NORMAL
      //CHANNEL.  THE TANGENT OR UV IS WRONG.
      _mesh->addColor(colors[color_idx]);
      _mesh->addNormal(c3);
      _mesh->addUV(uv);
      _mesh->addTangent(c4);
      color_idx++;
    }
  }
  _mesh->constructBuffer();
  _mesh->setPrimitiveTypeToPoints();
  _mesh->bindAttributesToVAO(*_shader);
  //_mesh->bindAttributesToVAO();
  
  
  _labelsMesh = new Mesh();
  for(vec3 v : _labels)
  {
    Color c(1.0, 1.0,0.0,1.0);
    Vector3 n(c.r ,c.g,c.b);
    Vector4 t(c.r ,c.g,c.b, c.a);
    Vector3 p(v.x ,v.y,v.z);
    _labelsMesh->addPosition(p);
      
      //FIXME:  THE COLOR CHANNEL IS OFF/INCORRECT.  HAD TO STUFF COLORS INTO THE NORMAL
      //CHANNEL.  THE TANGENT OR UV IS WRONG.
    _labelsMesh->addColor(c);
    _labelsMesh->addNormal(n);
    _labelsMesh->addTangent(t);
  }
  _labelsMesh->constructBuffer();
  _labelsMesh->setPrimitiveTypeToPoints();
  _labelsMesh->bindAttributesToVAO(*_shader);

  _mvp.identity();

  vec3 min, max;
  _mesh->calculateBoundingBox(min, max);
  printf("\n");
  printf("Min %3.4f,%3.4f,%3.4f\n", min.x, min.y, min.z);
  printf("Max %3.4f,%3.4f,%3.4f\n", max.x, max.y, max.z);
  
  _fpsController = std::make_shared<FPSController>();
  InputManager::addListener(_fpsController);
  _fpsController->init();
}