コード例 #1
0
int main(int argc, char** argv)
{
	
	float a[]={	0.234400724097, 0.445210153051, 0.420883079381, 0.0584111370634, 0.930917795284, 0.463946380108, 0.827477442854, 0.195052690912,
		0.224843236267, 0.011674592046, 0.778465234345, 0.795119566607, 0.834330061452, 0.250878254601, 0.907848368295, 0.159768191396,
		0.359447753375, 0.694377176768, 0.323279688498, 0.590454463022, 0.32053508251, 0.25926247011, 0.473382632749, 0.680857359827,
		0.871843303433, 0.347550207092, 0.807721675262, 0.51342440135, 0.633862634367, 0.588847708996, 0.604920986251, 0.9485023141,
		0.511286105241, 0.780677021392, 0.346168472115, 0.408572254219, 0.977881372787, 0.994457177414, 0.553713182589, 0.181657338197,
		0.188679332574, 0.138351555791, 0.549762090688, 0.763422732648, 0.270469815182, 0.368094710756, 0.28652717945, 0.344130955251,
		0.808703681865, 0.48242375244, 0.0961390490465, 0.585178232015, 0.0947071702324, 0.00663925147531, 0.409282147388, 0.865532591897,
		0.233760414088, 0.399258033215, 0.547551739688, 0.078241816204, 0.672857401346, 0.083814529556, 0.68575517509, 0.213487218459	};
		
		vector<int> labels = {1,1,1,1,1,2,1,2};
		
		Mat data(Size(8,8), CV_32F, a);
		
		//cout << data << endl;
		
		PCA pca;
		LDA lda;
		
		pca(data, Mat(), CV_PCA_DATA_AS_ROW, 7);
		
		//cout << pca.project(data) << endl;
		
		lda.compute(pca.project(data), labels);
		
		cout << lda.project(pca.project(data)) << endl;
		//cout << lda.reconstruct(lda.project(pca.project(data))) << endl;
		
		return 0;
}
コード例 #2
0
//Rate a location on how likely it is to be a bubble
double rateBubble(Mat& det_img_gray, Point bubble_location, PCA& my_PCA){
	Mat query_pixels, pca_components;
	getRectSubPix(det_img_gray, Point(14,18), bubble_location, query_pixels);
	query_pixels.reshape(0,1).convertTo(query_pixels, CV_32F);
	pca_components = my_PCA.project(query_pixels);
	//The rating is the SSD of query pixels and their back projection
	Mat out = my_PCA.backProject(pca_components)- query_pixels;
	return sum(out.mul(out)).val[0];
}
コード例 #3
0
void columbiaTest(int testId = 0)
{
	CascadeClassifier classifier;
	classifier.load("haarcascades/haarcascade_frontalface_alt_tree.xml");

	ShapePredictor predictor;
	predictor.load("model/helen.txt");

	PCA pca;
	FileStorage fs("model/pca.xml", FileStorage::READ);
	fs["mean"] >> pca.mean;
	fs["eigenvals"] >> pca.eigenvalues;
	fs["eigenvecs"] >> pca.eigenvectors;
	fs.release();

	/*LDA lda;
	lda.load("model/lda.xml");*/

	SVM svm;
	svm.load("model/svm.xml");

	cout << "\nmodel loaded" << endl;

	// test prediction
	cout << "\nbegin test" << endl;
	int corr = 0, total = 0;

	Mat_<float> labels, multihog, ldmks;
	collectData(testId, classifier, predictor,
				labels, multihog, ldmks);

	for (int i = 0; i < multihog.rows; i++) {
		Mat_<float> pcaVec = pca.project(multihog.row(i));
		Mat_<float> datVec(1, pcaVec.cols + ldmks.cols);
		for (int j = 0; j < pcaVec.cols; j++)
			datVec(0, j) = pcaVec(0, j);
		for (int j = 0; j < ldmks.cols; j++)
			datVec(0, j + pcaVec.cols) = ldmks(i, j);
		//Mat_<float> ldaVec = lda.project(datVec);

		float pred = svm.predict(datVec);
		if ((int)pred == (int)labels(i, 0))
			corr++;

		total++;
	}
	cout << "testId = " << testId << endl;
	cout << "corr = " << corr << " , total = " << total << endl;
	cout << "percentage: " << (double)corr / total << endl;

	ofstream fout("data/testId" + to_string(testId) + ".txt");
	fout << "corr = " << corr << " , total = " << total << endl;
	fout << "percentage: " << (double)corr / total << endl;
	fout.close();
}
コード例 #4
0
void test_pca9685(PCA & pca)
{
	//Rise
	float val = 0.0f;
	for(; val < 1.0; val += 0.001f)
	{		
		for(unsigned i = 0; i < 15; ++i)
			pca.set_channel_pulse_width(i, val);
	}
	
	//Fall
	for(; val > 0.0; val -= 0.001f)
		for(unsigned i = 0; i < 15; ++i)
			pca.set_channel_pulse_width(i, val);
}
コード例 #5
0
ファイル: main.cpp プロジェクト: melias122/scratch
int
predict(PCA &pca, Mat &train, Mat face, double threshold = 0.3, double distance = 44000){
    Mat w = pca.project(face);
    Mat predicted = pca.backProject(w);

    int label = -1;
    double min = distance * threshold;

    for(int i = 0; i < 15; i++){
        for(int j = 0; j < 8; j++){
            double d = norm(predicted, train.row((i*8) + j));
            if(d < min){
                min = d;
                label = i;
            }
        }
    }

    return label;
}
コード例 #6
0
void compressFeature( string filename, std::vector< std::vector<float> > &models, const int dim, bool ascii ){
  PCA pca;
  pca.read( filename.c_str(), ascii );
  VectorXf variance = pca.getVariance();
  MatrixXf tmpMat = pca.getAxis();
  MatrixXf tmpMat2 = tmpMat.block(0,0,tmpMat.rows(),dim);
  const int num = (int)models.size();
  for( int i=0; i<num; i++ ){
    Map<VectorXf> vec( &(models[i][0]), models[i].size() );
    //vec = tmpMat2.transpose() * vec;
    VectorXf tmpvec = tmpMat2.transpose() * vec;
    models[i].resize( dim );
    if( WHITENING ){
      for( int t=0; t<dim; t++ )
	models[i][t] = tmpvec[t] / sqrt( variance( t ) );
    }
    else{
      for( int t=0; t<dim; t++ )
	models[i][t] = tmpvec[t];
    }
  }
}
コード例 #7
0
void PCANormalEstimator::estimateNormals(vtkPolyData* data, double neighRadius)
{
	vtkPointLocator* locator = vtkPointLocator::New();
	  locator->SetDataSet(data);
	  locator->BuildLocator();

	int i, numOfPoints = data->GetNumberOfPoints();
	vtkDoubleArray* normals = vtkDoubleArray::New();
	  normals->SetNumberOfComponents(3);
	  normals->SetNumberOfTuples(numOfPoints);

	PCA pca;
	double p[3], com[3], eigenvals[3], eigenvecs[3][3];
	vtkPoints* points = data->GetPoints();
	vtkIdList* neighs = vtkIdList::New();

	for ( i = 0 ; i < numOfPoints ; ++i )
	{
		points->GetPoint(i, p);
		neighs->Reset();
		locator->FindPointsWithinRadius(neighRadius, p, neighs);
		if ( neighs->GetNumberOfIds() < 3 )
		{
			normals->SetTuple3(i, 0.0, 0.0, 0.0);
			continue;
		}
		// Perform PCA
		pca.doPCA(points, neighs, eigenvecs, eigenvals, com);
		// Save the normal
		normals->SetTuple3(i, eigenvecs[0][2], eigenvecs[1][2], eigenvecs[2][2]);
	}

	data->GetPointData()->SetNormals(normals);

	// Clean up
	locator->Delete();
	normals->Delete();
	neighs->Delete();
}
コード例 #8
0
//We should probably encapsulate the PCA stuff...
void train_PCA_classifier(Mat& train_img_gray, PCA& my_PCA, Mat& comparison_vectors){
	//Goes though all the selected bubble locations and puts their pixels into rows of
	//a giant matrix called so we can perform PCA on them (we need at least 3 locations to do this)
	Mat PCA_set = Mat::zeros(bubble_locations.size(), 18*14, CV_32F);
	for(size_t i = 0; i < bubble_locations.size(); i+=1) {
		Mat PCA_set_row;
		getRectSubPix(train_img_gray, Point(14,18), bubble_locations[i], PCA_set_row);
		PCA_set_row.convertTo(PCA_set_row, CV_32F);
		PCA_set.row(i) += PCA_set_row.reshape(0,1);
	}

	my_PCA = PCA(PCA_set, Mat(), CV_PCA_DATA_AS_ROW, 5);
	comparison_vectors = my_PCA.project(PCA_set);
}
コード例 #9
0
ファイル: MTT.cpp プロジェクト: gnishida/Morph
void MTT::doPCA(RoadGraph* roads, PCA& pca) {
	// 頂点データを使って行列を生成する
	cv::Mat vmat = cv::Mat(GraphUtil::getNumVertices(roads), 2, CV_64FC1);

	int count = 0;
	RoadVertexIter vi, vend;
	for (boost::tie(vi, vend) = boost::vertices(roads->graph); vi != vend; ++vi) {
		if (!roads->graph[*vi]->valid) continue;

		vmat.at<double>(count, 0) = roads->graph[*vi]->getPt().x();
		vmat.at<double>(count, 1) = roads->graph[*vi]->getPt().y();

		count++;
	}

	// PCAを実施
	pca.pca(vmat, false);
}
コード例 #10
0
ファイル: pcltools.cpp プロジェクト: ipa-nhg/kukadu
	// according to http://www.pcl-users.org/Finding-oriented-bounding-box-of-a-cloud-td4024616.html
	FitCube PCLTools::fitBox(PointCloud<PointXYZ>::Ptr cloud) {

		FitCube retCube;
		PCA<PointXYZ> pca;
		PointCloud<PointXYZ> proj;

		pca.setInputCloud(cloud);
		pca.project(*cloud, proj);

		PointXYZ proj_min;
		PointXYZ proj_max;
		getMinMax3D(proj, proj_min, proj_max);

		PointXYZ min;
		PointXYZ max;
		pca.reconstruct(proj_min, min);
		pca.reconstruct(proj_max, max);

		// Rotation of PCA
		Eigen::Matrix3f rot_mat = pca.getEigenVectors();

		// Translation of PCA
		Eigen::Vector3f cl_translation = pca.getMean().head(3);

		Eigen::Matrix3f affine_trans;

		// Reordering of principal components
		affine_trans.col(2) << (rot_mat.col(0).cross(rot_mat.col(1))).normalized();
		affine_trans.col(0) << rot_mat.col(0);
		affine_trans.col(1) << rot_mat.col(1);

		retCube.rotation = Eigen::Quaternionf(affine_trans);
		Eigen::Vector4f t = pca.getMean();

		retCube.translation = Eigen::Vector3f(t.x(), t.y(), t.z());

		retCube.width = fabs(proj_max.x - proj_min.x);
		retCube.height = fabs(proj_max.y - proj_min.y);
		retCube.depth = fabs(proj_max.z - proj_min.z);

		return retCube;

	}
コード例 #11
0
vector<float> ReconnaissanceHandler::recognisePCA(vector<float>& caracteristicVector, PCA pca, vector<vector<float>>& classes)
{
	Mat v(caracteristicVector.size(),1, CV_32F);
	int i = 0;
	for (float f : caracteristicVector) {
		v.at<float>(i, 0) = f;
		++i;
	}
	Mat reduceVector = pca.project(v);
	
	vector<float> caractReduced;
	for (i = 0; i < reduceVector.rows; ++i) {
		caractReduced.push_back(reduceVector.at<float>(i, 0));
	}

	vector<float> dist;
	for (int i = 0; i < classes.size(); ++i)
	{
		dist.push_back(distanceVector(caractReduced, classes.at(i)));
	}
	return dist;
}
コード例 #12
0
//Compare the bubbles with all the bubbles used in the classifier.
bubble_val checkBubble(Mat& det_img_gray, Point bubble_location, PCA& my_PCA, Mat& comparison_vectors, Point search_window=Point(5,5)){
	Mat query_pixels;

	//This bit of code finds the location in the search_window most likely to be a bubble 
	//then it checks that rather than the exact specified location.
	Mat out = Mat::zeros(Size(search_window.y, search_window.x) , CV_32FC1);
	Point offset = Point(bubble_location.x - search_window.x/2, bubble_location.y - search_window.y/2);
	for(size_t i = 0; i < search_window.y; i+=1) {
		for(size_t j = 0; j < search_window.x; j+=1) {
			out.row(i).col(j) += rateBubble(det_img_gray, Point(j,i) + offset, my_PCA);
		}
	}
	Point min_location;
	minMaxLoc(out, NULL,NULL, &min_location);

	getRectSubPix(det_img_gray, Point(14,18), min_location + offset, query_pixels);

	query_pixels.reshape(0,1).convertTo(query_pixels, CV_32F);
	Point max_location;
	matchTemplate(comparison_vectors, my_PCA.project(query_pixels), out, CV_TM_CCOEFF_NORMED);
	minMaxLoc(out, NULL,NULL,NULL, &max_location);
	return bubble_values[max_location.y];
}
コード例 #13
0
//********************************
//* main
int main(int argc, char* argv[]) {
  if( (argc != 13) && (argc != 15) ){
    std::cerr << "usage: " << argv[0] << " [path] <rank_num> <exist_voxel_num_threshold> [model_pca_filename] <dim_model> <size1> <size2> <size3> <detect_th> <distance_th> <model_num> /input:=/camera/rgb/points" << std::endl;
    exit( EXIT_FAILURE );
  }
  char tmpname[ 1000 ];
  ros::init (argc, argv, "detect_object_vosch_multi", ros::init_options::AnonymousName);

  // read the length of voxel side
  sprintf( tmpname, "%s/param/parameters.txt", argv[1] );
  voxel_size = Param::readVoxelSize( tmpname );

  detect_th = atof( argv[9] );
  distance_th = atof( argv[10] );
  model_num = atoi( argv[11] );
  rank_num = atoi( argv[2] );

  // set marker color
  const int marker_model_num = 6;
  if( model_num > marker_model_num ){
    std::cerr << "Please set more marker colors for detection of more than " << marker_model_num << " objects." << std::endl;
    exit( EXIT_FAILURE );
  }
  marker_color_r = new float[ marker_model_num ];
  marker_color_g = new float[ marker_model_num ];
  marker_color_b = new float[ marker_model_num ];
  marker_color_r[ 0 ] = 1.0; marker_color_g[ 0 ] = 0.0; marker_color_b[ 0 ] = 0.0;  // red
  marker_color_r[ 1 ] = 0.0; marker_color_g[ 1 ] = 1.0; marker_color_b[ 1 ] = 0.0;  // green
  marker_color_r[ 2 ] = 0.0; marker_color_g[ 2 ] = 0.0; marker_color_b[ 2 ] = 1.0;  // blue
  marker_color_r[ 3 ] = 1.0; marker_color_g[ 3 ] = 1.0; marker_color_b[ 3 ] = 0.0;  // yellow
  marker_color_r[ 4 ] = 0.0; marker_color_g[ 4 ] = 1.0; marker_color_b[ 4 ] = 1.0;  // cyan
  marker_color_r[ 5 ] = 1.0; marker_color_g[ 5 ] = 0.0; marker_color_b[ 5 ] = 1.0;  // magenta
  // marker_color_r[ 0 ] = 0.0; marker_color_g[ 0 ] = 1.0; marker_color_b[ 0 ] = 0.0; // green
  // marker_color_r[ 1 ] = 0.0; marker_color_g[ 1 ] = 0.0; marker_color_b[ 1 ] = 1.0; // blue
  // marker_color_r[ 2 ] = 0.0; marker_color_g[ 2 ] = 1.0; marker_color_b[ 2 ] = 1.0; // cyan
  // marker_color_r[ 3 ] = 1.0; marker_color_g[ 3 ] = 0.0; marker_color_b[ 3 ] = 0.0; // pink

  // read the number of voxels in each subdivision's side of scene
  box_size = Param::readBoxSizeScene( tmpname );

  // read the dimension of compressed feature vectors
  dim = Param::readDim( tmpname );
  const int dim_model = atoi(argv[5]);
  if( dim <= dim_model ){
    std::cerr << "ERR: dim_model should be less than dim(in dim.txt)" << std::endl;
    exit( EXIT_FAILURE );
  }

  // read the threshold for RGB binalize
  sprintf( tmpname, "%s/param/color_threshold.txt", argv[1] );
  Param::readColorThreshold( color_threshold_r, color_threshold_g, color_threshold_b, tmpname );

  // determine the size of sliding box
  region_size = box_size * voxel_size;
  float tmp_val = atof(argv[6]) / region_size;
  int size1 = (int)tmp_val;
  if( ( ( tmp_val - size1 ) >= 0.5 ) || ( size1 == 0 ) ) size1++;
  tmp_val = atof(argv[7]) / region_size;
  int size2 = (int)tmp_val;
  if( ( ( tmp_val - size2 ) >= 0.5 ) || ( size2 == 0 ) ) size2++;
  tmp_val = atof(argv[8]) / region_size;
  int size3 = (int)tmp_val;
  if( ( ( tmp_val - size3 ) >= 0.5 ) || ( size3 == 0 ) ) size3++;
  sliding_box_size_x = size1 * region_size;
  sliding_box_size_y = size2 * region_size;
  sliding_box_size_z = size3 * region_size;

  // set variables
  search_obj.setModelNum( model_num );
#ifdef CCHLAC_TEST
  sprintf( tmpname, "%s/param/max_c.txt", argv[1] );
#else
  sprintf( tmpname, "%s/param/max_r.txt", argv[1] );
#endif
  search_obj.setNormalizeVal( tmpname );
  search_obj.setRange( size1, size2, size3 );
  search_obj.setRank( rank_num * model_num ); // for removeOverlap()
  search_obj.setThreshold( atoi(argv[3]) );

  // read projection axes of the target objects' subspace
  FILE *fp = fopen( argv[4], "r" );
  char **model_file_names = new char* [ model_num ];
  char line[ 1000 ];
  for( int i=0; i<model_num; i++ ){
    model_file_names[ i ] = new char [ 1000 ];
    if( fgets( line, sizeof(line), fp ) == NULL ) std::cerr<<"fgets err"<<std::endl;
    line[ strlen( line ) - 1 ] = '\0';
    //fscanf( fp, "%s\n", model_file_names + i );
    //model_file_names[ i ] = line;
    sprintf( model_file_names[ i ], "%s", line );
    //std::cout << model_file_names[ i ] << std::endl;
  }
  fclose(fp);
  search_obj.readAxis( model_file_names, dim, dim_model, ASCII_MODE_P, MULTIPLE_SIMILARITY );

  // read projection axis for feature compression
  PCA pca;
  sprintf( tmpname, "%s/models/compress_axis", argv[1] );
  pca.read( tmpname, ASCII_MODE_P );
  Eigen::MatrixXf tmpaxis = pca.getAxis();
  Eigen::MatrixXf axis = tmpaxis.block( 0,0,tmpaxis.rows(),dim );
  Eigen::MatrixXf axis_t = axis.transpose();
  Eigen::VectorXf variance = pca.getVariance();
  if( WHITENING )
    search_obj.setSceneAxis( axis_t, variance, dim );
  else
    search_obj.setSceneAxis( axis_t );

  // object detection
  VoxelizeAndDetect vad;
  vad.loop();
  ros::spin();

  return 0;
}
コード例 #14
0
int main(int argc, char** argv)
{
	
	string filter = "Gaussian";
	string descriptor = "HAOG";
	string database = "CUFSF";
	uint count = 0;
	
	vector<string> extraPhotos, photos, sketches;
	
	loadImages(argv[1], photos);
	loadImages(argv[2], sketches);
	//loadImages(argv[7], extraPhotos);
	
	uint nPhotos = photos.size(),
	nSketches = sketches.size(),
	nExtra = extraPhotos.size();
	
	uint nTraining = 2*nPhotos/3;
	
	cout << "Read " << nSketches << " sketches." << endl;
	cout << "Read " << nPhotos + nExtra << " photos." << endl;
	
	vector<Mat*> sketchesDescriptors(nSketches), photosDescriptors(nPhotos), extraDescriptors(nExtra);
	
	Mat img, temp;
	
	int size=32, delta=16;
	
	#pragma omp parallel for private(img, temp)
	for(uint i=0; i<nSketches; i++){
		img = imread(sketches[i],0);
		sketchesDescriptors[i] = new Mat();
		
		#pragma omp critical
		temp = extractDescriptors(img, size, delta, filter, descriptor);
		
		*(sketchesDescriptors[i]) = temp.clone();
	}
	
	#pragma omp parallel for private(img, temp)
	for(uint i=0; i<nPhotos; i++){
		img = imread(photos[i],0);
		photosDescriptors[i] = new Mat();
		
		#pragma omp critical
		temp = extractDescriptors(img, size, delta, filter, descriptor);
		
		*(photosDescriptors[i]) = temp.clone();
	}
	
	#pragma omp parallel for private(img, temp)
	for(uint i=0; i<nExtra; i++){
		img = imread(extraPhotos[i],0);
		extraDescriptors[i] = new Mat();
		
		#pragma omp critical
		temp = extractDescriptors(img, size, delta, filter, descriptor);
		
		*(extraDescriptors[i]) = temp.clone();
	}
	
	auto seed = unsigned(count);
	
	srand(seed);
	random_shuffle(sketchesDescriptors.begin(), sketchesDescriptors.end());
	srand(seed);
	random_shuffle(photosDescriptors.begin(), photosDescriptors.end());
	
	//training
	vector<Mat*> trainingSketchesDescriptors, trainingPhotosDescriptors;
	
	trainingSketchesDescriptors.insert(trainingSketchesDescriptors.end(), sketchesDescriptors.begin(), sketchesDescriptors.begin()+nTraining);
	trainingPhotosDescriptors.insert(trainingPhotosDescriptors.end(), photosDescriptors.begin(), photosDescriptors.begin()+nTraining);
	
	//testing
	vector<Mat*> testingSketchesDescriptors, testingPhotosDescriptors;
	
	testingSketchesDescriptors.insert(testingSketchesDescriptors.end(), sketchesDescriptors.begin()+nTraining, sketchesDescriptors.end());
	testingPhotosDescriptors.insert(testingPhotosDescriptors.end(), photosDescriptors.begin()+nTraining, photosDescriptors.end());
	testingPhotosDescriptors.insert(testingPhotosDescriptors.end(), extraDescriptors.begin(), extraDescriptors.end());
	
	PCA pca;
	LDA lda;
	vector<int> labels;
	
	uint nTestingSketches = testingSketchesDescriptors.size(),
	nTestingPhotos = testingPhotosDescriptors.size();
	
	for(uint i=0; i<nTraining; i++){
		labels.push_back(i);
	}
	labels.insert(labels.end(),labels.begin(),labels.end());
	
	//bags
	vector<Mat*> testingSketchesDescriptorsBag(nTestingSketches), testingPhotosDescriptorsBag(nTestingPhotos);
	
	for(int b=0; b<200; b++){
		
		vector<int> bag_indexes = gen_bag(154, 0.1);
		
		uint dim = (bag(*(trainingPhotosDescriptors[0]), bag_indexes, 154)).total();
		
		Mat X(dim, 2*nTraining, CV_32F);
		
		#pragma omp parallel for private(temp)
		for(uint i=0; i<nTraining; i++){
			temp = *(trainingSketchesDescriptors[i]);
			temp = bag(temp, bag_indexes, 154);
			temp.copyTo(X.col(i));
		}
		
		#pragma omp parallel for private(temp)
		for(uint i=0; i<nTraining; i++){
			temp = *(trainingPhotosDescriptors[i]);
			temp = bag(temp, bag_indexes, 154);
			temp.copyTo(X.col(i+nTraining));
		}
		
		Mat Xs = X(Range::all(), Range(0,nTraining));
		Mat Xp = X(Range::all(), Range(nTraining,2*nTraining));
		
		Mat meanX = Mat::zeros(dim, 1, CV_32F), instance;
		Mat meanXs = Mat::zeros(dim, 1, CV_32F);
		Mat meanXp = Mat::zeros(dim, 1, CV_32F);
		
		// calculate sums
		for (int i = 0; i < X.cols; i++) {
			instance = X.col(i);
			add(meanX, instance, meanX);
		}
		
		for (int i = 0; i < Xs.cols; i++) {
			instance = Xs.col(i);
			add(meanXs, instance, meanXs);
		}
		
		for (int i = 0; i < Xp.cols; i++) {
			instance = Xp.col(i);
			add(meanXp, instance, meanXp);
		}
		
		// calculate total mean
		meanX.convertTo(meanX, CV_32F, 1.0/static_cast<double>(X.cols));
		meanXs.convertTo(meanXs, CV_32F, 1.0/static_cast<double>(Xs.cols));
		meanXp.convertTo(meanXp, CV_32F, 1.0/static_cast<double>(Xp.cols));
		
		
		// subtract the mean of matrix
		for(int i=0; i<X.cols; i++) {
			Mat c_i = X.col(i);
			subtract(c_i, meanX.reshape(1,dim), c_i);
		}
		
		for(int i=0; i<Xs.cols; i++) {
			Mat c_i = Xs.col(i);
			subtract(c_i, meanXs.reshape(1,dim), c_i);
		}
		
		for(int i=0; i<Xp.cols; i++) {
			Mat c_i = Xp.col(i);
			subtract(c_i, meanXp.reshape(1,dim), c_i);
		}
		
		if(meanX.total() >= nTraining)
			pca(X, Mat(), CV_PCA_DATA_AS_COL, nTraining-1);
		else
			pca.computeVar(X, Mat(), CV_PCA_DATA_AS_COL, .99);
		
		Mat W1 = pca.eigenvectors.t();
		Mat ldaData = (W1.t()*X).t();
		lda.compute(ldaData, labels);
		Mat W2 = lda.eigenvectors();
		W2.convertTo(W2, CV_32F);
		Mat projectionMatrix = (W2.t()*W1.t()).t();
		
		//testing
		#pragma omp parallel for private(temp)
		for(uint i=0; i<nTestingSketches; i++){
			temp = *(testingSketchesDescriptors[i]);
			temp = bag(temp, bag_indexes, 154);
			temp = projectionMatrix.t()*(temp-meanX);
			if(b==0){
				testingSketchesDescriptorsBag[i] = new Mat();
				*(testingSketchesDescriptorsBag[i]) = temp.clone();
			}
			else{
				vconcat(*(testingSketchesDescriptorsBag[i]), temp, *(testingSketchesDescriptorsBag[i]));
			}
		}
		
		#pragma omp parallel for private(temp)
		for(uint i=0; i<nTestingPhotos; i++){
			temp = *(testingPhotosDescriptors[i]);
			temp = bag(temp, bag_indexes, 154);
			temp = projectionMatrix.t()*(temp-meanX);
			if(b==0){
				testingPhotosDescriptorsBag[i] = new Mat();
				*(testingPhotosDescriptorsBag[i]) = temp.clone();
			}
			else{
				vconcat(*(testingPhotosDescriptorsBag[i]), temp, *(testingPhotosDescriptorsBag[i]));
			}
		}
	}
	
	Mat distancesChi = Mat::zeros(nTestingSketches,nTestingPhotos,CV_64F);
	Mat distancesL2 = Mat::zeros(nTestingSketches,nTestingPhotos,CV_64F);
	Mat distancesCosine = Mat::zeros(nTestingSketches,nTestingPhotos,CV_64F);
	
	#pragma omp parallel for
	for(uint i=0; i<nTestingSketches; i++){
		for(uint j=0; j<nTestingPhotos; j++){
			distancesChi.at<double>(i,j) = chiSquareDistance(*(testingSketchesDescriptorsBag[i]),*(testingPhotosDescriptorsBag[j]));
			distancesL2.at<double>(i,j) = norm(*(testingSketchesDescriptorsBag[i]),*(testingPhotosDescriptorsBag[j]));
			distancesCosine.at<double>(i,j) = abs(1-cosineDistance(*(testingSketchesDescriptorsBag[i]),*(testingPhotosDescriptorsBag[j])));
		}
	}
	
	string file1name = "kernel-drs-" + descriptor + database + to_string(nTraining) + string("chi") + to_string(count) + string(".xml");
	string file2name = "kernel-drs-" + descriptor + database + to_string(nTraining) + string("l2") + to_string(count) + string(".xml");
	string file3name = "kernel-drs-" + descriptor + database + to_string(nTraining) + string("cosine") + to_string(count) + string(".xml");
	
	FileStorage file1(file1name, FileStorage::WRITE);
	FileStorage file2(file2name, FileStorage::WRITE);
	FileStorage file3(file3name, FileStorage::WRITE);
	
	file1 << "distanceMatrix" << distancesChi;
	file2 << "distanceMatrix" << distancesL2;
	file3 << "distanceMatrix" << distancesCosine;
	
	file1.release();
	file2.release();
	file3.release();
	
	return 0;
}
コード例 #15
0
ファイル: main.cpp プロジェクト: melias122/scratch
void
FMR_FNMR(PCA &pca, Mat &train, Mat &test, double step = 0.05, double distance = 44000.0){

    struct stat {
        double total{0}, match{0};
        double res() { return match/total; }
    };

    Mat w, r;

    cout << "Threshold;" << "FMR;" << "FNMR;" << "ROC TP;" << "ROC FP"  << endl;

    for(double t = 0.0; t <= 1.000001; t += step) {
        double min = distance * t;

        stat fmr, fnmr;

        for(int i = 0; i < 15; i++){
            for(int j = 0; j < 3; j++){

                Mat face = test.row((i*3) + j);

                pca.project(face, w);
                pca.backProject(w, r);

                for(int k = 0; k < 15; k++) {
                    if(k == i){
                        // FNMR
                        for(int l = 0; l < 8; l++) {
                            double d = norm(r, train.row((k*8) + l));
                            if(d > min){
                                fnmr.match++;
                            }
//                            else {
                                // ROC TP
//                            }
                            fnmr.total++;
                        }

                    } else {
                        // FMR
                        for(int l = 0; l < 8; l++){
                            double d = norm(r, train.row((k*8) + l));
                            if(d < min){
                                fmr.match++;

                                // ROC FP
                            }
                            fmr.total++;
                        }
                    }
                }
            }
        }

        cout << 1-t << ";"
             << fmr.res() << ";"
             << fnmr.res() << ";"
             << ((fnmr.total-fnmr.match)/fnmr.total) << ";"
             << fmr.res() << endl;
    }
}
コード例 #16
0
	Melder_help (L"CrossCorrelationTable");
END

DIRECT (CrossCorrelationTable_to_PCA)
	LOOP {
		iam (CrossCorrelationTable);
		praat_new (SSCP_to_PCA (me), my name);
	}
END

FORM (Sound_and_PCA_projectChannels, L"Sound & PCA: To Sound (project channels)", 0)
	NATURAL (L"Number of components", L"10")
	OK
DO
	Sound me = FIRST (Sound);
	PCA thee = FIRST (PCA);
	praat_new (Sound_and_PCA_projectChannels (me, thee, GET_INTEGER (L"Number of components")), Thing_getName (me), L"_projected");
END

FORM (Sound_and_PCA_whitenChannels, L"Sound & PCA: To Sound (whiten channels)", 0)
	NATURAL (L"Number of components", L"10")
	OK
DO
	Sound me = FIRST (Sound);
	PCA thee = FIRST (PCA);
	praat_new (Sound_and_PCA_whitenChannels (me, thee, GET_INTEGER (L"Number of components")), Thing_getName (me), L"_white");
END

DIRECT (CrossCorrelationTable_to_CrossCorrelationTables)
	autoCrossCorrelationTables thee = CrossCorrelationTables_create ();
	long nrows = 0, ncols = 0, nselected = 0;
コード例 #17
0
    void run(int)
    {
        const Size sz(200, 500);

        double diffPrjEps, diffBackPrjEps,
        prjEps, backPrjEps,
        evalEps, evecEps;
        int maxComponents = 100;
        double retainedVariance = 0.95;
        Mat rPoints(sz, CV_32FC1), rTestPoints(sz, CV_32FC1);
        RNG& rng = ts->get_rng();

        rng.fill( rPoints, RNG::UNIFORM, Scalar::all(0.0), Scalar::all(1.0) );
        rng.fill( rTestPoints, RNG::UNIFORM, Scalar::all(0.0), Scalar::all(1.0) );

        PCA rPCA( rPoints, Mat(), CV_PCA_DATA_AS_ROW, maxComponents ), cPCA;

        // 1. check C++ PCA & ROW
        Mat rPrjTestPoints = rPCA.project( rTestPoints );
        Mat rBackPrjTestPoints = rPCA.backProject( rPrjTestPoints );

        Mat avg(1, sz.width, CV_32FC1 );
        reduce( rPoints, avg, 0, CV_REDUCE_AVG );
        Mat Q = rPoints - repeat( avg, rPoints.rows, 1 ), Qt = Q.t(), eval, evec;
        Q = Qt * Q;
        Q = Q /(float)rPoints.rows;

        eigen( Q, eval, evec );
        /*SVD svd(Q);
         evec = svd.vt;
         eval = svd.w;*/

        Mat subEval( maxComponents, 1, eval.type(), eval.data ),
        subEvec( maxComponents, evec.cols, evec.type(), evec.data );

    #ifdef CHECK_C
        Mat prjTestPoints, backPrjTestPoints, cPoints = rPoints.t(), cTestPoints = rTestPoints.t();
        CvMat _points, _testPoints, _avg, _eval, _evec, _prjTestPoints, _backPrjTestPoints;
    #endif

        // check eigen()
        double eigenEps = 1e-6;
        double err;
        for(int i = 0; i < Q.rows; i++ )
        {
            Mat v = evec.row(i).t();
            Mat Qv = Q * v;

            Mat lv = eval.at<float>(i,0) * v;
            err = cvtest::norm( Qv, lv, NORM_L2 );
            if( err > eigenEps )
            {
                ts->printf( cvtest::TS::LOG, "bad accuracy of eigen(); err = %f\n", err );
                ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
                return;
            }
        }
        // check pca eigenvalues
        evalEps = 1e-6, evecEps = 1e-3;
        err = cvtest::norm( rPCA.eigenvalues, subEval, NORM_L2 );
        if( err > evalEps )
        {
            ts->printf( cvtest::TS::LOG, "pca.eigenvalues is incorrect (CV_PCA_DATA_AS_ROW); err = %f\n", err );
            ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
            return;
        }
        // check pca eigenvectors
        for(int i = 0; i < subEvec.rows; i++)
        {
            Mat r0 = rPCA.eigenvectors.row(i);
            Mat r1 = subEvec.row(i);
            err = cvtest::norm( r0, r1, CV_L2 );
            if( err > evecEps )
            {
                r1 *= -1;
                double err2 = cvtest::norm(r0, r1, CV_L2);
                if( err2 > evecEps )
                {
                    Mat tmp;
                    absdiff(rPCA.eigenvectors, subEvec, tmp);
                    double mval = 0; Point mloc;
                    minMaxLoc(tmp, 0, &mval, 0, &mloc);

                    ts->printf( cvtest::TS::LOG, "pca.eigenvectors is incorrect (CV_PCA_DATA_AS_ROW); err = %f\n", err );
                    ts->printf( cvtest::TS::LOG, "max diff is %g at (i=%d, j=%d) (%g vs %g)\n",
                               mval, mloc.y, mloc.x, rPCA.eigenvectors.at<float>(mloc.y, mloc.x),
                               subEvec.at<float>(mloc.y, mloc.x));
                    ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
                    return;
                }
            }
        }

        prjEps = 1.265, backPrjEps = 1.265;
        for( int i = 0; i < rTestPoints.rows; i++ )
        {
            // check pca project
            Mat subEvec_t = subEvec.t();
            Mat prj = rTestPoints.row(i) - avg; prj *= subEvec_t;
            err = cvtest::norm(rPrjTestPoints.row(i), prj, CV_RELATIVE_L2);
            if( err > prjEps )
            {
                ts->printf( cvtest::TS::LOG, "bad accuracy of project() (CV_PCA_DATA_AS_ROW); err = %f\n", err );
                ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
                return;
            }
            // check pca backProject
            Mat backPrj = rPrjTestPoints.row(i) * subEvec + avg;
            err = cvtest::norm( rBackPrjTestPoints.row(i), backPrj, CV_RELATIVE_L2 );
            if( err > backPrjEps )
            {
                ts->printf( cvtest::TS::LOG, "bad accuracy of backProject() (CV_PCA_DATA_AS_ROW); err = %f\n", err );
                ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
                return;
            }
        }

        // 2. check C++ PCA & COL
        cPCA( rPoints.t(), Mat(), CV_PCA_DATA_AS_COL, maxComponents );
        diffPrjEps = 1, diffBackPrjEps = 1;
        Mat ocvPrjTestPoints = cPCA.project(rTestPoints.t());
        err = cvtest::norm(cv::abs(ocvPrjTestPoints), cv::abs(rPrjTestPoints.t()), CV_RELATIVE_L2 );
        if( err > diffPrjEps )
        {
            ts->printf( cvtest::TS::LOG, "bad accuracy of project() (CV_PCA_DATA_AS_COL); err = %f\n", err );
            ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
            return;
        }
        err = cvtest::norm(cPCA.backProject(ocvPrjTestPoints), rBackPrjTestPoints.t(), CV_RELATIVE_L2 );
        if( err > diffBackPrjEps )
        {
            ts->printf( cvtest::TS::LOG, "bad accuracy of backProject() (CV_PCA_DATA_AS_COL); err = %f\n", err );
            ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
            return;
        }

        // 3. check C++ PCA w/retainedVariance
        cPCA( rPoints.t(), Mat(), CV_PCA_DATA_AS_COL, retainedVariance );
        diffPrjEps = 1, diffBackPrjEps = 1;
        Mat rvPrjTestPoints = cPCA.project(rTestPoints.t());

        if( cPCA.eigenvectors.rows > maxComponents)
            err = cvtest::norm(cv::abs(rvPrjTestPoints.rowRange(0,maxComponents)), cv::abs(rPrjTestPoints.t()), CV_RELATIVE_L2 );
        else
            err = cvtest::norm(cv::abs(rvPrjTestPoints), cv::abs(rPrjTestPoints.colRange(0,cPCA.eigenvectors.rows).t()), CV_RELATIVE_L2 );

        if( err > diffPrjEps )
        {
            ts->printf( cvtest::TS::LOG, "bad accuracy of project() (CV_PCA_DATA_AS_COL); retainedVariance=0.95; err = %f\n", err );
            ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
            return;
        }
        err = cvtest::norm(cPCA.backProject(rvPrjTestPoints), rBackPrjTestPoints.t(), CV_RELATIVE_L2 );
        if( err > diffBackPrjEps )
        {
            ts->printf( cvtest::TS::LOG, "bad accuracy of backProject() (CV_PCA_DATA_AS_COL); retainedVariance=0.95; err = %f\n", err );
            ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
            return;
        }

    #ifdef CHECK_C
        // 4. check C PCA & ROW
        _points = rPoints;
        _testPoints = rTestPoints;
        _avg = avg;
        _eval = eval;
        _evec = evec;
        prjTestPoints.create(rTestPoints.rows, maxComponents, rTestPoints.type() );
        backPrjTestPoints.create(rPoints.size(), rPoints.type() );
        _prjTestPoints = prjTestPoints;
        _backPrjTestPoints = backPrjTestPoints;

        cvCalcPCA( &_points, &_avg, &_eval, &_evec, CV_PCA_DATA_AS_ROW );
        cvProjectPCA( &_testPoints, &_avg, &_evec, &_prjTestPoints );
        cvBackProjectPCA( &_prjTestPoints, &_avg, &_evec, &_backPrjTestPoints );

        err = cvtest::norm(prjTestPoints, rPrjTestPoints, CV_RELATIVE_L2);
        if( err > diffPrjEps )
        {
            ts->printf( cvtest::TS::LOG, "bad accuracy of cvProjectPCA() (CV_PCA_DATA_AS_ROW); err = %f\n", err );
            ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
            return;
        }
        err = cvtest::norm(backPrjTestPoints, rBackPrjTestPoints, CV_RELATIVE_L2);
        if( err > diffBackPrjEps )
        {
            ts->printf( cvtest::TS::LOG, "bad accuracy of cvBackProjectPCA() (CV_PCA_DATA_AS_ROW); err = %f\n", err );
            ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
            return;
        }

        // 5. check C PCA & COL
        _points = cPoints;
        _testPoints = cTestPoints;
        avg = avg.t(); _avg = avg;
        eval = eval.t(); _eval = eval;
        evec = evec.t(); _evec = evec;
        prjTestPoints = prjTestPoints.t(); _prjTestPoints = prjTestPoints;
        backPrjTestPoints = backPrjTestPoints.t(); _backPrjTestPoints = backPrjTestPoints;

        cvCalcPCA( &_points, &_avg, &_eval, &_evec, CV_PCA_DATA_AS_COL );
        cvProjectPCA( &_testPoints, &_avg, &_evec, &_prjTestPoints );
        cvBackProjectPCA( &_prjTestPoints, &_avg, &_evec, &_backPrjTestPoints );

        err = cvtest::norm(cv::abs(prjTestPoints), cv::abs(rPrjTestPoints.t()), CV_RELATIVE_L2 );
        if( err > diffPrjEps )
        {
            ts->printf( cvtest::TS::LOG, "bad accuracy of cvProjectPCA() (CV_PCA_DATA_AS_COL); err = %f\n", err );
            ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
            return;
        }
        err = cvtest::norm(backPrjTestPoints, rBackPrjTestPoints.t(), CV_RELATIVE_L2);
        if( err > diffBackPrjEps )
        {
            ts->printf( cvtest::TS::LOG, "bad accuracy of cvBackProjectPCA() (CV_PCA_DATA_AS_COL); err = %f\n", err );
            ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
            return;
        }
    #endif
        // Test read and write
        FileStorage fs( "PCA_store.yml", FileStorage::WRITE );
        rPCA.write( fs );
        fs.release();

        PCA lPCA;
        fs.open( "PCA_store.yml", FileStorage::READ );
        lPCA.read( fs.root() );
        err = cvtest::norm( rPCA.eigenvectors, lPCA.eigenvectors, CV_RELATIVE_L2 );
        if( err > 0 )
        {
            ts->printf( cvtest::TS::LOG, "bad accuracy of write/load functions (YML); err = %f\n", err );
            ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
        }
        err = cvtest::norm( rPCA.eigenvalues, lPCA.eigenvalues, CV_RELATIVE_L2 );
        if( err > 0 )
        {
            ts->printf( cvtest::TS::LOG, "bad accuracy of write/load functions (YML); err = %f\n", err );
            ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
        }
        err = cvtest::norm( rPCA.mean, lPCA.mean, CV_RELATIVE_L2 );
        if( err > 0 )
        {
            ts->printf( cvtest::TS::LOG, "bad accuracy of write/load functions (YML); err = %f\n", err );
            ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
        }
    }
コード例 #18
0
ファイル: eigenfacer.cpp プロジェクト: 418231020/fire-cbir
int main(int argc , char **argv) {
  
  GetPot cl(argc,argv);
  
  if(cl.search("-h")) {USAGE(); exit(0);}

  string  line;
  if(cl.search("-E")) {
    cout << "Estimating PCA" << endl;
    
    ifstream filelist(cl.follow("filelist","-E"));
    ImageFeature img;    
    getline(filelist,line);

    img.load(line,true);
    cout << line << endl;
    
    PCA pca(img.size());
    cout << img.size() << endl;
    
    pca.putData(img.layerVector(0));
    
    while(getline(filelist,line)) {
      cout << line << endl;
      img.load(line,true);
      pca.putData(img.layerVector(0));
    }
    pca.dataEnd();
    pca.save(cl.follow("covariance.pca","-c"));
    pca.calcPCA();
    pca.save(cl.follow("transformation.pca","-t"));
    filelist.close();
    
  } else if(cl.search("-T")) {
    PCA pca;
    pca.load(cl.follow("transformation.pca","-t"));
    int dim=cl.follow(20,"-d");
    
    ifstream filelist(cl.follow("filelist","-T"));
    vector<double> tmp;
    ImageFeature img;
    while(getline(filelist,line)) {
      cout << line << endl;
      img.load(line,true);
      tmp=pca.transform(img.layerVector(0),img.size());
      tmp.resize(dim);
      VectorFeature tmpvec(tmp);
      tmpvec.save(line+".pca.vec.gz");
    }
    filelist.close();
  } else if(cl.search("-B")) {
    PCA pca;
    pca.load(cl.follow("transformation.pca","-t"));
    int x=cl.follow(16,"-x");
    int y=cl.follow(16,"-y");
    ifstream filelist(cl.follow("filelist","-B"));
    vector<double> tmp;
    VectorFeature tmpvec;
    while(getline(filelist,line)) {
      cout << line << endl;
      tmpvec.load(line);
      vector<double> tmp;
      tmp=pca.backTransform(tmpvec.data());
      ImageFeature img(tmp,x,y);
      cutoff(img);
      img.save(line+".backpca.png");
    }
    filelist.close();
  } else if(cl.search("-M")) {
    ImageFeature img; img.load(cl.follow("image.png","-M"),true);
    PCA pca;
    vector<double> backproj,vec;
    pca.load(cl.follow("transformation.pca","-t"));
    int w=cl.follow(16,"-x");
    int h=cl.follow(16,"-y");
    double scalefac=cl.follow(0.8333,"-s");
    int dim=cl.follow(20,"-d");
    
    ImageFeature scimg(img);
    ImageFeature patch;
    uint minX=100000, minY=100000; 
    uint maxX=100000, maxY=100000; 
   
    while(int(scimg.xsize())>=w and int(scimg.ysize())>=h) {
      DBG(10) << VAR(scimg.xsize()) << " x " << VAR(scimg.ysize()) << endl;
      ImageFeature faceprobmap(scimg.xsize(),scimg.ysize(),1);
      double maxDist=0.0;
      double minDist=numeric_limits<double>::max();
      
      vector<double> tmpvec;

      for(uint x=0;x<scimg.xsize();++x) {
        DBG(10) << VAR(x) << endl;
        for(uint y=0;y<scimg.ysize();++y) {
          patch=getPatch(scimg,x,y,x+w,y+h);
          vec=patch.layerVector(0);
          
          vector<double> imgMinMean=vec;
          for(uint i=0;i<imgMinMean.size();++i) {
            imgMinMean[i]-=pca.mean()[i];
          }
          double energyImg=getEnergy(imgMinMean);
          
          tmpvec=pca.transform(vec,dim);
          double energyTrans=getEnergy(tmpvec);
          backproj=pca.backTransform(tmpvec);
          
          double energyBack=getEnergy(backproj);

          double d=0;
          double tmp;
          for(uint i=0;i<backproj.size();++i) {
            tmp=backproj[i]-vec[i];
            d+=tmp*tmp;
          }
          faceprobmap(x,y,0)=d;
          
          DBG(10) << VAR(energyImg) << " "
                  << VAR(energyTrans) << " " 
                  << VAR(energyBack) << " "
                  << VAR(energyImg-energyTrans) << " "
                  << VAR(d) << endl;


          if(minDist>d) {
            minDist=d;
            minX=x; minY=y;
          }
          if(maxDist<d) {
            maxDist=d;
            maxX=x; maxY=y;
          }
        }
      }
      
      DBG(10) << VAR(scimg.xsize()) << " " << VAR(scimg.ysize()) << endl;
      DBG(10) << VAR(minDist) << " " << VAR(minX) << " " << VAR(minY) << endl;
      DBG(10) << VAR(maxDist) << " " << VAR(maxX) << " " << VAR(maxY) << endl << endl;

      normalize(faceprobmap);
      ostringstream filenamestream;
      filenamestream << cl.follow("image.png","-M") << ".fpm." << (scimg.xsize()) <<".png";
      faceprobmap.save(filenamestream.str());
      
      uint newW=int(scimg.xsize()*scalefac);
      uint newH=int(scimg.ysize()*scalefac);
      scimg=scale(scimg,newW,newH);
    }
    
  } else if(cl.search("-F")) {
#ifdef HAVE_FFT_LIBRARY
    ImageFeature img; img.load(cl.follow("image.png","-F"),true);
    PCA pca; pca.load(cl.follow("transformation.pca","-t"));
    int w=cl.follow(16,"-x");
    int h=cl.follow(16,"-y");
    double scalefac=cl.follow(0.8333,"-s");
    uint dim=cl.follow(20,"-d");

    DBG(10) << "Eigenfaces loaded" << endl;

    
    ImageFeature scimg=img;
    while(int(scimg.xsize())>w and int(scimg.ysize())>h) {
      ImageFeature fpm=detect(scimg,pca,dim,w,h);

      pair<uint,uint> p;

      p=argmax(fpm);
      
      DBG(10) << scimg.xsize() << "x" << scimg.ysize() << " (" <<p.first<<", "<< p.second << ") " << VAR(maximum(fpm)) ;

      p=argmin(fpm);
      BLINK(10) << " (" <<p.first<<", "<< p.second << ") " << VAR(minimum(fpm)) << endl;

      normalize(fpm);
      
      ostringstream filenamestream;
      filenamestream << cl.follow("image.png","-F") << ".fpm." << (scimg.xsize()) <<".png";
      fpm.save(filenamestream.str());
      
      scimg=scale(scimg,int(scimg.xsize()*scalefac),int(scimg.ysize()*scalefac));
    }
  
    

#else
    DBG(10) << "compiled without FFT lib. this does not work. use -M option" << endl;
#endif
    
  } else {
    USAGE();
    exit(20);
  }
}
コード例 #19
0
ファイル: praat_BSS_init.cpp プロジェクト: READSEARCH/praat
	double startTime = GET_REAL (U"left Time range"), endTime = GET_REAL (U"right Time range");
	const char32 *channelRanges = GET_STRING (U"Channel ranges");
	bool useCorrelation = GET_INTEGER (U"Use") == 2;
	LOOP {
		iam (EEG);
		autoPCA pca = EEG_to_PCA (me, startTime, endTime, channelRanges, useCorrelation);
		praat_new (pca.move(), my name);
	}
END

FORM (EEG_and_PCA_to_EEG_principalComponents, U"EEG & PCA: To EEG (principal components)", U"EEG & PCA: To EEG (principal components)...")
	INTEGER (U"Number of components", U"0 (=all)")
	OK
DO
	EEG me = FIRST (EEG);
	PCA thee = FIRST (PCA);
	autoEEG him = EEG_and_PCA_to_EEG_principalComponents (me, thee, GET_INTEGER (U"Number of components"));
	praat_new (him.move(), my name, U"_pc");
END

FORM (EEG_and_PCA_to_EEG_whiten, U"EEG & PCA: To EEG (whiten)", U"EEG & PCA: To EEG (whiten)...")
	INTEGER (U"Number of components", U"0 (=all)")
	OK
DO
	EEG me = FIRST (EEG);
	PCA thee = FIRST (PCA);
	autoEEG him = EEG_and_PCA_to_EEG_whiten (me, thee, GET_INTEGER (U"Number of components"));
	praat_new (him.move(), my name, U"_white");
END

FORM (EEG_to_Sound_modulated, U"EEG: To Sound (modulated)", 0)
int main(int argc, char** argv)
{
	string filter = "Gaussian";
	string descriptor = "SIFT";
	string database = "Forensic-extra";
	uint count = 1;
	
	vector<string> extraPhotos, photos, sketches;
	
	loadImages(argv[5], photos);
	loadImages(argv[6], sketches);
	loadImages(argv[7], extraPhotos);
	
	uint nPhotos = photos.size(),
	nSketches = sketches.size(),
	nExtra = extraPhotos.size();
	
	uint nTraining = 2*nPhotos/3;
	
	cout << "Read " << nSketches << " sketches." << endl;
	cout << "Read " << nPhotos + nExtra << " photos." << endl;
	
	vector<Mat*> sketchesDescriptors(nSketches), photosDescriptors(nPhotos), extraDescriptors(nExtra);
	
	Mat img, temp;
	
	int size=32, delta=16;
	
	#pragma omp parallel for private(img, temp)
	for(uint i=0; i<nSketches; i++){
		img = imread(sketches[i],0);
		sketchesDescriptors[i] = new Mat();
		
		#pragma omp critical
		temp = extractDescriptors(img, size, delta, filter, descriptor);
		
		*(sketchesDescriptors[i]) = temp.clone();
	}
	
	#pragma omp parallel for private(img, temp)
	for(uint i=0; i<nPhotos; i++){
		img = imread(photos[i],0);
		photosDescriptors[i] = new Mat();
		
		#pragma omp critical
		temp = extractDescriptors(img, size, delta, filter, descriptor);
		
		*(photosDescriptors[i]) = temp.clone();
	}
	
	#pragma omp parallel for private(img, temp)
	for(uint i=0; i<nExtra; i++){
		img = imread(extraPhotos[i],0);
		extraDescriptors[i] = new Mat();
		
		#pragma omp critical
		temp = extractDescriptors(img, size, delta, filter, descriptor);
		
		*(extraDescriptors[i]) = temp.clone();
	}
	
	auto seed = unsigned(count);
	
	srand(seed);
	random_shuffle(sketchesDescriptors.begin(), sketchesDescriptors.end());
	srand(seed);
	random_shuffle(photosDescriptors.begin(), photosDescriptors.end());
	
	//training
	vector<Mat*> trainingSketchesDescriptors1, trainingPhotosDescriptors1, 
	trainingSketchesDescriptors2, trainingPhotosDescriptors2;
	
	trainingSketchesDescriptors1.insert(trainingSketchesDescriptors1.end(), sketchesDescriptors.begin(), sketchesDescriptors.begin()+nTraining/2);
	trainingPhotosDescriptors1.insert(trainingPhotosDescriptors1.end(), photosDescriptors.begin(), photosDescriptors.begin()+nTraining/2);
	trainingSketchesDescriptors2.insert(trainingSketchesDescriptors2.end(), sketchesDescriptors.begin()+nTraining/2, sketchesDescriptors.begin()+nTraining);
	trainingPhotosDescriptors2.insert(trainingPhotosDescriptors2.end(), photosDescriptors.begin()+nTraining/2, photosDescriptors.begin()+nTraining);
	
	uint nTraining1 = trainingPhotosDescriptors1.size(),
	nTraining2 = trainingPhotosDescriptors2.size();
	
	//testing
	vector<Mat*> testingSketchesDescriptors, testingPhotosDescriptors;
	
	testingSketchesDescriptors.insert(testingSketchesDescriptors.end(), sketchesDescriptors.begin()+nTraining, sketchesDescriptors.end());
	testingPhotosDescriptors.insert(testingPhotosDescriptors.end(), photosDescriptors.begin()+nTraining, photosDescriptors.end());
	testingPhotosDescriptors.insert(testingPhotosDescriptors.end(), extraDescriptors.begin(), extraDescriptors.end());
	
	uint nTestingSketches = testingSketchesDescriptors.size(),
	nTestingPhotos = testingPhotosDescriptors.size();
	
	PCA pca;
	LDA lda;
	vector<int> labels;
	
	for(uint i=0; i<nTraining2; i++){
		labels.push_back(i);
	}
	labels.insert(labels.end(),labels.begin(),labels.end());
	
	//bags
	vector<Mat*> testingSketchesDescriptorsBag(nTestingSketches), testingPhotosDescriptorsBag(nTestingPhotos), 
	trainingPhotosDescriptors1Temp(nTraining1), trainingSketchesDescriptors1Temp(nTraining1);
	
	for(int b=0; b<30; b++){
		
		vector<int> bag_indexes = gen_bag(154, 0.1);
		
		#pragma omp parallel for private(temp)
		for(uint i=0; i<nTraining1; i++){
			temp = *(trainingSketchesDescriptors1[i]);
			temp = bag(temp, bag_indexes, 154);
			trainingSketchesDescriptors1Temp[i] = new Mat();
			*(trainingSketchesDescriptors1Temp[i]) = temp.clone();
		}
		
		#pragma omp parallel for private(temp)
		for(uint i=0; i<nTraining1; i++){
			temp = *(trainingPhotosDescriptors1[i]);
			temp = bag(temp, bag_indexes, 154);
			trainingPhotosDescriptors1Temp[i] = new Mat();
			*(trainingPhotosDescriptors1Temp[i]) = temp.clone();
		}
		
		Kernel k(trainingPhotosDescriptors1Temp, trainingSketchesDescriptors1Temp);
		k.compute();
		
		uint dim = (k.projectGallery(bag(*(trainingPhotosDescriptors1[0]), bag_indexes, 154))).total();
		
		Mat X(dim, 2*nTraining2, CV_32F);
		
		#pragma omp parallel for private(temp)
		for(uint i=0; i<nTraining2; i++){
			temp = *(trainingSketchesDescriptors2[i]);
			temp = bag(temp, bag_indexes, 154);
			temp = k.projectProbe(temp);
			temp.copyTo(X.col(i));
		}
		
		#pragma omp parallel for private(temp)
		for(uint i=0; i<nTraining2; i++){
			temp = *(trainingPhotosDescriptors2[i]);
			temp = bag(temp, bag_indexes, 154);
			temp = k.projectGallery(temp);
			temp.copyTo(X.col(i+nTraining2));
		}
		
		Mat meanX = Mat::zeros(dim, 1, CV_32F), instance;
		
		// calculate sums
		for (int i = 0; i < X.cols; i++) {
			instance = X.col(i);
			add(meanX, instance, meanX);
		}
		
		// calculate total mean
		meanX.convertTo(meanX, CV_32F, 1.0/static_cast<double>(X.cols));
		
		// subtract the mean of matrix
		for(int i=0; i<X.cols; i++) {
			Mat c_i = X.col(i);
			subtract(c_i, meanX.reshape(1,dim), c_i);
		}
		
		pca.computeVar(X, Mat(), CV_PCA_DATA_AS_COL, .99);
		
		Mat W1 = pca.eigenvectors.t();
		Mat ldaData = (W1.t()*X).t();
		lda.compute(ldaData, labels);
		Mat W2 = lda.eigenvectors();
		W2.convertTo(W2, CV_32F);
		Mat projectionMatrix = (W2.t()*W1.t()).t();
		
		//testing
		#pragma omp parallel for private(temp)
		for(uint i=0; i<nTestingSketches; i++){
			temp = *(testingSketchesDescriptors[i]);
			temp = bag(temp, bag_indexes, 154);
			temp = k.projectProbe(temp);
			temp = projectionMatrix.t()*(temp-meanX);
			if(b==0){
				testingSketchesDescriptorsBag[i] = new Mat();
				*(testingSketchesDescriptorsBag[i]) = temp.clone();
			}
			else{
				vconcat(*(testingSketchesDescriptorsBag[i]), temp, *(testingSketchesDescriptorsBag[i]));
			}
		}
		
		#pragma omp parallel for private(temp)
		for(uint i=0; i<nTestingPhotos; i++){
			temp = *(testingPhotosDescriptors[i]);
			temp = bag(temp, bag_indexes, 154);
			temp = k.projectGallery(temp);
			temp = projectionMatrix.t()*(temp-meanX);
			if(b==0){
				testingPhotosDescriptorsBag[i] = new Mat();
				*(testingPhotosDescriptorsBag[i]) = temp.clone();
			}
			else{
				vconcat(*(testingPhotosDescriptorsBag[i]), temp, *(testingPhotosDescriptorsBag[i]));
			}
		}
	}
	
	Mat distancesCosine = Mat::zeros(nTestingSketches,nTestingPhotos,CV_64F);
	
	#pragma omp parallel for
	for(uint i=0; i<nTestingSketches; i++){
		for(uint j=0; j<nTestingPhotos; j++){
			distancesCosine.at<double>(i,j) = abs(1-cosineDistance(*(testingSketchesDescriptorsBag[i]),*(testingPhotosDescriptorsBag[j])));
		}
	}
	
	string file1name = "kernel-prs-" + filter + descriptor + database + to_string(nTraining) + string("cosine") + to_string(count) + string(".xml");
	
	FileStorage file1(file1name, FileStorage::WRITE);
	
	file1 << "distanceMatrix" << distancesCosine;
	
	file1.release();
	
	return 0;
}
コード例 #21
0
void LocalFeatureExtractor::pcaTransform(const PCA &pca, LocalFeatures &lf) {
  for(uint l=0;l<lf.numberOfFeatures_;++l) {
    lf[l]=pca.transform(lf[l],settings_.pcadim);
  }
  lf.dim_=settings_.pcadim;
}
コード例 #22
0
ファイル: eigenfacer.cpp プロジェクト: 418231020/fire-cbir
ImageFeature detect(const ImageFeature& img, const PCA& pca,uint dim, uint w, uint h) {
  uint imgdim=img.xsize()*img.ysize();
  uint padx=img.xsize()+w; uint pady=img.ysize()+h;
  uint paddim=padx*pady;
  
#ifdef HAVE_FFT_LIBRARY

  // get memory for calculations
  fftw_complex *FIMG=NULL, **FFILTER=NULL, **FMULT=NULL;
  FIMG=new fftw_complex[paddim]; 
  for(uint i=0;i<paddim;++i) {FIMG[i].re=0.0;FIMG[i].im=0.0;}
  FFILTER=new fftw_complex*[dim];
  FMULT=new fftw_complex*[dim];
  for(uint i=0;i<dim;++i) {
    FFILTER[i]=new fftw_complex[paddim];
    for(uint j=0;j<imgdim;++j) {FFILTER[i][j].re=0.0;FFILTER[i][j].im=0.0;}
    
    FMULT[i]=new fftw_complex[paddim];
    for(uint j=0;j<imgdim;++j) {FMULT[i][j].re=0.0;FMULT[i][j].im=0.0;}
  }
  
  DBG(10) << "Memory allocated" << endl;
  
  vector<double> meanTrans=pca.transform(pca.mean(),dim);
  
  // create strategy for fft
  fftwnd_plan plan = fftw2d_create_plan(padx,pady, FFTW_FORWARD, FFTW_ESTIMATE  | FFTW_IN_PLACE); 
  fftwnd_plan planb = fftw2d_create_plan(padx,pady, FFTW_BACKWARD, FFTW_ESTIMATE | FFTW_IN_PLACE); 
  
  DBG(10) << "Strategies for FFT created" << endl;
  
  //copy image into fourier transform data structure
  for(uint x=0;x<img.xsize();++x) { for(uint y=0;y<img.ysize();++y) { FIMG[y*padx+x].re=img(x,y,0); } }
  
  // fourier transform the image
  fftwnd_one(plan,FIMG,NULL);
  
  DBG(10) << "Image Transformed" << endl;
  
  
  // fourier transform the filters
  for(uint d=0;d<dim;++d) {
    for(uint x=0;x<w;++x) {
      for(uint y=0;y<h;++y) {
        uint i=y*padx+x;
        FFILTER[d][i].re=pca.eigenvector(d)[y*w+x];
      }
    }
    fftwnd_one(plan,FFILTER[d],NULL);
    DBG(10) << "Filter " << d << " transformed." << endl;
  }
  
  // multiplication in fourier domain
  for(uint d=0;d<dim;++d) {
    for(uint i=0;i<paddim;++i) {
      FMULT[d][i].re=FIMG[i].re*FFILTER[d][i].re-FIMG[i].im*FFILTER[d][i].im;
      FMULT[d][i].im=FIMG[i].re*FFILTER[d][i].im+FIMG[i].im*FFILTER[d][i].re;
    }
    DBG(10) << "Filter " << d << " applied." ;
    //fourier back transform
    fftwnd_one(planb,FMULT[d],NULL);
    BLINK(10) << "... backtransformed.." ;
    
    
    // subtract transformed mean
    for(uint i=0;i<paddim;++i) {
      FMULT[d][i].re=FMULT[d][i].re/paddim-meanTrans[d];
    }
    BLINK(10) << ". Mean subtracted." << endl;
  }
  
  
  double energyTrans, energyImg;
  
  ImageFeature fpm(img.xsize(),img.ysize(),1);
  for(uint x=0;x<img.xsize();++x) {
    for(uint y=0;y<img.ysize();++y) {
      uint i=y*padx+x;
      energyTrans=energTrans(FMULT,i,dim);
      energyImg=energImg(img,x,y,w,h,pca.mean());
      DBG(25) << VAR(x) << " " << VAR(y) << " " << VAR(energyTrans) << " " << VAR(energyImg) << endl;
      fpm(x,y,0)=energyImg-energyTrans;
    }
  }

  DBG(10) << "fpm generation." << endl;
  return fpm;
  #endif

}
コード例 #23
0
//********************************
//* main
int main(int argc, char* argv[]) {
  if( (argc != 12) && (argc != 14) ){
    std::cerr << "usage: " << argv[0] << " [path] <rank_num> <exist_voxel_num_threshold> [model_pca_filename] <dim_model> <size1> <size2> <size3> <detect_th> <distance_th> /input:=/camera/rgb/points" << std::endl;
    exit( EXIT_FAILURE );
  }
  char tmpname[ 1000 ];
  ros::init (argc, argv, "detectObj", ros::init_options::AnonymousName);

  // read the length of voxel side
  sprintf( tmpname, "%s/param/parameters.txt", argv[1] );
  voxel_size = Param::readVoxelSize( tmpname );

  detect_th = atof( argv[9] );
  distance_th = atof( argv[10] );
  rank_num = atoi( argv[2] );

  // read the number of voxels in each subdivision's side of scene
  box_size = Param::readBoxSizeScene( tmpname );

  // read the dimension of compressed feature vectors
  dim = Param::readDim( tmpname );

  // set the dimension of the target object's subspace
  const int dim_model = atoi(argv[5]);
  if( dim <= dim_model ){
    std::cerr << "ERR: dim_model should be less than dim(in dim.txt)" << std::endl;
    exit( EXIT_FAILURE );
  }

  // read the threshold for RGB binalize
  sprintf( tmpname, "%s/param/color_threshold.txt", argv[1] );
  Param::readColorThreshold( color_threshold_r, color_threshold_g, color_threshold_b, tmpname );

  // determine the size of sliding box
  region_size = box_size * voxel_size;
  float tmp_val = atof(argv[6]) / region_size;
  int size1 = (int)tmp_val;
  if( ( ( tmp_val - size1 ) >= 0.5 ) || ( size1 == 0 ) ) size1++;
  tmp_val = atof(argv[7]) / region_size;
  int size2 = (int)tmp_val;
  if( ( ( tmp_val - size2 ) >= 0.5 ) || ( size2 == 0 ) ) size2++;
  tmp_val = atof(argv[8]) / region_size;
  int size3 = (int)tmp_val;
  if( ( ( tmp_val - size3 ) >= 0.5 ) || ( size3 == 0 ) ) size3++;
  sliding_box_size_x = size1 * region_size;
  sliding_box_size_y = size2 * region_size;
  sliding_box_size_z = size3 * region_size;

  // set variables
  search_obj.setRange( size1, size2, size3 );
  search_obj.setRank( rank_num );
  search_obj.setThreshold( atoi(argv[3]) );
  search_obj.readAxis( argv[4], dim, dim_model, ASCII_MODE_P, MULTIPLE_SIMILARITY );

  // read projection axis of the target object's subspace
  PCA pca;
  sprintf( tmpname, "%s/models/compress_axis", argv[1] );
  pca.read( tmpname, ASCII_MODE_P );
  Eigen::MatrixXf tmpaxis = pca.getAxis();
  Eigen::MatrixXf axis = tmpaxis.block( 0,0,tmpaxis.rows(),dim );
  Eigen::MatrixXf axis_t = axis.transpose();
  Eigen::VectorXf variance = pca.getVariance();
  if( WHITENING )
    search_obj.setSceneAxis( axis_t, variance, dim );
  else
    search_obj.setSceneAxis( axis_t );

  // object detection
  VoxelizeAndDetect vad;
  vad.loop();
  ros::spin();

  return 0;
}
コード例 #24
0
void test(set<int> &testSet, int code)
{
	CascadeClassifier classifier;
	classifier.load("haarcascades/haarcascade_frontalface_alt_tree.xml");

	ShapePredictor predictor;
	predictor.load("model/helen.txt");

	PCA pca;
	FileStorage fs("model/girl_pca.xml", FileStorage::READ);
	fs["mean"] >> pca.mean;
	fs["eigenvals"] >> pca.eigenvalues;
	fs["eigenvecs"] >> pca.eigenvectors;

	SVM svm;
	svm.load("model/girl_svm.xml");

	cout << "\nmodel loaded" << endl;

	ifstream fin("img/labels.txt");
	ofstream fout("data/out_" + 
				  to_string(code) + ".txt");
	VideoWriter writer("data/out.avi", 0, 10, Size(1920, 1080), true);

	string line;
	int corr = 0, total = 0;
	while (getline(fin, line)) {
		stringstream ss(line);
		int frame, label;
		ss >> frame >> label;
		label -= 49;

		if (testSet.find(frame) == testSet.end())
			continue;

		Mat vis = imread("img/" + to_string(frame) + ".jpg",
						 CV_LOAD_IMAGE_UNCHANGED);
		Mat_<uchar> img;
		cvtColor(vis, img, COLOR_BGR2GRAY);
		BBox bbox = getTestBBox(img, classifier);
		if (EmptyBox(bbox)) continue;

		Mat_<double> shape = predictor(img, bbox);
		Geom G;	initGeom(shape, G);
		Pose P; calcPose(G, P);

		Mat_<uchar> lEye, rEye;
		regularize(img, bbox, P, shape, lEye, rEye);

		vector<float> lRlt;
		vector<float> rRlt;
		calcMultiHog(lEye, lRlt);
		calcMultiHog(rEye, rRlt);

		Mat_<float> pcaVec, ldmks;

		vector<float> _hog2nd_vec;
		for (int k = 0; k < lRlt.size(); k++)
			_hog2nd_vec.push_back(lRlt[k]);
		for (int k = 0; k < rRlt.size(); k++)
			_hog2nd_vec.push_back(rRlt[k]);
		Mat_<float> multihog = Mat_<float>(_hog2nd_vec).reshape(1, 1);
		pcaVec = pca.project(multihog);

		vector<float> _ldmks;
		for (int i = 28; i < 48; i++) {
			_ldmks.push_back((shape(i, 0) - bbox.cx) / bbox.w);
			_ldmks.push_back((shape(i, 1) - bbox.cy) / bbox.h);
		}
		float mouthx = (shape(51, 0) + shape(62, 0) + shape(66, 0) + shape(57, 0)) / 4;
		float mouthy = (shape(51, 1) + shape(62, 1) + shape(66, 1) + shape(57, 1)) / 4;
		_ldmks.push_back((mouthx - bbox.cx) / bbox.w);
		_ldmks.push_back((mouthy - bbox.cy) / bbox.h);
		float maxVal = *std::max_element(_ldmks.begin(), _ldmks.end());
		for (int i = 0; i < _ldmks.size(); i++) _ldmks[i] *= 1.0 / maxVal; // scale to [-1, 1]
		ldmks = Mat_<float>(_ldmks).reshape(1, 1);

		Mat_<float> sample(1, pcaVec.cols + ldmks.cols);
		for (int j = 0; j < pcaVec.cols; j++)
			sample(0, j) = pcaVec(0, j);
		for (int j = 0; j < ldmks.cols; j++)
			sample(0, j + pcaVec.cols) = ldmks(0, j);

		int pred = svm.predict(sample);
		if (pred == label) corr++;
		total++;

		fout << frame << ' ' << label << ' ' << pred << endl;

		string s1, s2;
		switch (label) {
		case 0: s1 = "annotation: Eye"; break;
		case 1: s1 = "annotation: Face"; break;
		case 2: s1 = "annotation: NOF"; break;
		}
		switch (pred) {
		case 0: s2 = "prediction: Eye"; break;
		case 1: s2 = "prediction: Face"; break;
		case 2: s2 = "prediction: NOF"; break;
		}

		Scalar c1, c2;
		c1 = CV_RGB(255, 255, 0);	// yellow
		if (pred == label) c2 = CV_RGB(0, 255, 0);	// green
		else c2 = CV_RGB(255, 0, 0);				// red

		putText(vis, s1, Point(1280, 100), CV_FONT_HERSHEY_PLAIN, 4.0, c1, 3);
		putText(vis, s2, Point(1280, 200), CV_FONT_HERSHEY_PLAIN, 4.0, c2, 3);
		/*imshow("glance", vis);
		waitKey(0);*/
		writer.write(vis);
	}
	cout << corr << ' ' << total << endl;
	cout << (double)corr / total << endl;
	fin.close();
	fout.close();
	writer.release();
}
コード例 #25
0
ファイル: main.cpp プロジェクト: lzx1413/image_feature
int main(int argc, char* argv[])
{
	string trainlistfile;
	string testlistfile;
	string modelfile;
	string rawfeaturefile;
	string resultpath;
	string kmeansfilepath;
	int maxComponent = 32;
	int cluster_num = 512;
	int feature_dimention = 128;
	if (argc == 1)
	{
		help();
		return -1;
	}
	for (int i = 1; i < argc;++i)
	{
		
		if (string(argv[i])=="--trainlist")
		{
			trainlistfile = argv[++i];
		}
		else if (string(argv[i]) == "--testlist")
		{
			testlistfile = argv[++i];
		}
		else if (string(argv[i]) == "--clusternum")
		{
			cluster_num = std::stoi(argv[++i]);
		}
		else if (string(argv[i]) == "--featuredim")
		{
			feature_dimention = std::stoi(argv[++i]);
		}
		else if (string(argv[i]) == "--modelfile")
		{
			modelfile = argv[++i];
		}
		else if (string(argv[i]) == "--rawfeature")
		{
			rawfeaturefile = argv[++i];
		}
		else if (string(argv[i])=="--resultpath")
		{
			resultpath = argv[++i];

		}
		else if (string(argv[i]) == "--maxComponent")
		{
			maxComponent = std::stoi(argv[++i]);

	}
		else if (string(argv[i]) == "--kmeansfilepath")
		{
			kmeansfilepath = argv[++i];

		}
	}
#ifdef kmeans_pca
	vlad::configure(256,128);
	PCA pca;
	vlad::loadPCAmodel("pca_model.yml",pca);
	cout<<pca.eigenvalues<<endl;
#endif
#ifdef kmeans
	vlad::getKmeansModel(cluster_num,feature_dimention,rawfeaturefile,resultpath);
#endif
#ifdef VLAD
	Stopwatch watch;
	watch.Start();
	vlad::GetVladFeatureFromSift(kmeansfilepath,testlistfile);
	watch.Stop();
	cout<<"getkmeans use "<<watch.GetTime()<<endl;
	getchar();
#endif
#ifdef ReduceMatrix
	ifstream rawfeature(rawfeaturefile.c_str());
	ofstream outPut(resultpath.c_str());
	string single_raw_fea;
	PCA pca;
	vlad::loadPCAmodel(modelfile, pca);
	while (getline(rawfeature,single_raw_fea))
	{
		vector<string> ww;
		cv::Mat rawfeature_mat = cv::Mat(1,LENGTH_OF_SINGLE_DATA, CV_32F);
		split_words(single_raw_fea, " ", ww);
		for (int j = 0; j < LENGTH_OF_SINGLE_DATA; ++j){
			rawfeature_mat.at<float>(0, j) = atof(ww[j].c_str());
		}
		cv::Mat matric_reduced;
		matric_reduced = pca.project(rawfeature_mat);
		for (int i=0;i<matric_reduced.cols;++i)
		{
			outPut<<matric_reduced.at<float>(0,i);
		}
		outPut <<endl;
        
	}
	rawfeature.close();
	outPut.close();
#else
#ifdef ALL



	    vlad::configure(cluster_num,feature_dimention);
		//vlad::getPCAmodel(trainlistfile,32);
    	vlad::ExitTheSiftFeature(trainlistfile);	
	   // vlad::GetVladFeature(testlistfile);
//	FV::GetGMMModel(32, 512);
	vector<float> feature{ 1, 1, 1, 1, 1, 1 };
	RootNormFeature(feature);
	Mat a = Mat::ones(4, 6,CV_32FC1);
	for (float a:feature)
	{
		cout << a << endl;
	}
	a.at<float>(0, 0) = 0;
	RootNormFeature(a);
	cout << a;
	getchar();
	/*Mat rawdata = Mat(1, 3, CV_32FC1);
	rawdata.setTo(1);
	cout << rawdata << endl;
	Mat result;
	Mat model = (Mat_<float>(3, 2) << 1, 1, 1, -1, -1, -1);
	encode2Binary(rawdata, model, result);
	cout << "work has been down"<< rawdata << endl << model << endl << result << endl;
	vector<float> rawdata2{ 1.0, 1.0, 1.0 };
	Mat result2;
	encode2Binary(rawdata2, model, result2);
	cout << result2 << endl;
	Mat model2 = (Mat_<uchar>(3, 2) << 1, 1, 1, 1, 1, 1);
	model2.assignTo(model2, CV_32FC1);
	cout << model2<<endl<<model2.type();
	for (int i = 0; i < 10;i++)
	{
		for (int i = 0; i < 10;i++)
		{
			cout << i << endl;
		}
	}*/
	//MethodTimeResume timetest("time.log");
	//timetest.test();
	//getchar();

	//vlad::getPCAmodel(trainlistfile, 32);
#endif
#endif // ReduceMatrix
	
}
コード例 #26
0
int main(int argc, char**argv) {
  GetPot cl(argc, argv);
  
  if(cl.search(2,"-h","--help")) {USAGE(); exit(0);}
  string pcafile=cl.follow("pca.pca",2,"-p","--pca");
  string imagefile=cl.follow("image",2,"-i","--img");

  VectorFeature vec;
  PCA pca; pca.load(pcafile);
  
  uint width, height, depth;
  if(cl.search(2,"-c","--color")) {
    depth=3;
  } else {
    depth=1;
  }
    
  if(!cl.search(2,"-w","--width") && !cl.search(2,"-h","--height")) {
    width=uint(sqrt(double(pca.dim())));
    height=width;
    if(int(height*width)!=pca.dim()) {
      ERR << "pca not for squared images, specify width or height" << endl
          << "height=" << height << "* width=" << width << "!= size=" << pca.dim() << endl;
      exit(20);
    } 
  } else {
    if(cl.search(2,"-w","--width") && !cl.search(2,"-h","--height")) {
      width=cl.follow(10,2,"-w","--width");
      height=pca.dim()/width;
      if(int(height*width)!=pca.dim()) {
        ERR << "pca images of this width, specify valid values" << endl
            << "height=" << height << "* width=" << width << "!= size=" << pca.dim() << endl;
        exit(20);
      }
    } else if(!cl.search(2,"-w","--width") && cl.search(2,"-h","--height")) {
      height=cl.follow(10,2,"-j","--height");
      width=pca.dim()/height;
      if(int(height*width)!=pca.dim()) {
        ERR << "pca images of this height, specify valid values" << endl
            << "height=" << height << "* width=" << width << "!= size=" << pca.dim() << endl;
        exit(20);
      }
    } else {
      height=cl.follow(10,2,"-j","--height");
      width=cl.follow(10,2,"-w","--width");
      if(int(height*width)!=pca.dim()) {
        ERR << "pca images of this height and width, specify valid values" << endl
            << "height=" << height << "* width=" << width << "!= size=" << pca.dim() << endl;
        exit(20);
      }
    }
  }



  vector<double> backtransformed;
  
  if(cl.search(2,"-v","--vec") && !cl.search(2,"-b","--base") && !cl.search(2,"-M","--mean")) {
    string vecfile=cl.follow("vec.vec",2,"-v","--vec");
    DBG(10) << "Loading Vectorfile " << vecfile << endl;
    vec.load(vecfile);    
    DBG(10) << "Vector to be backtransformed" ;
    for(uint i=0;i<vec.size();++i) {
      BLINK(10) << " "<< vec[i];
    }
    BLINK(10) << endl;

    if(cl.search("-n1st"))  vec[0]=0;

    if(cl.search("--nopca")) {
      backtransformed=vec.data();
    } else {
      backtransformed=pca.backTransform(vec.data());
    }
    
    DBG(10) << "Backtransformed Vector" ;
    for(uint i=0;i<backtransformed.size();++i) {
      BLINK(10) << " " <<backtransformed[i];
    }
    BLINK(10) << endl;

  } else if(cl.search(2,"-b","--base") && !cl.search(2,"-v","--vec") && !cl.search(2,"-M","--mean")) {
    uint base=cl.follow(0,2,"-b","--base");
    backtransformed=pca.eigenvector(base);
  } else if(!cl.search(2,"-b","--base") && !cl.search(2,"-v","--vec") && cl.search(2,"-M","--mean")) {
    backtransformed=pca.mean();
  } else {
    USAGE();
    exit(20);
  }
  
  ImageFeature image(width,height,depth);
  
  if(cl.search(2,"-m","--minMean")) {
    DBG(10) << "Subtracting mean" << endl;
    for(uint i=0;i<backtransformed.size();++i) {
      backtransformed[i]-pca.mean()[i];
    }
  }
  
  for(uint i=0;i<backtransformed.size();++i) {
    image[i]=backtransformed[i];
  }

  
  if(cl.search(2,"-n","--norm")) {
    DBG(10) << "normalization" << endl;
    normalize(image);
  }
  
  
//  //make values positive
//  shift(image,-minimum(image,0));
//
  cutoff(image);
//

  DBG(10) << "Going to save:" ;
  for(uint i=0;i<image.size();++i) {
    BLINK(10) << " " <<image[i];
  }
  BLINK(10) << endl;



  image.save(imagefile);
  DBG(10) << "cmdline was: "; printCmdline(argc,argv);
}
コード例 #27
0
int main(int argc, char**argv) {
  GetPot cl(argc, argv);

  if(cl.search(2,"-h","--help") or !cl.search("-lf")) {USAGE(); exit(0);}

  bool nonorm = cl.search(1, "-nonorm");
  LocalFeatures lf; lf.load(cl.follow("test.lf.gz","-lf"));
  uint sc = cl.follow(1, "-scale");
  uint w=uint(lf.winsize()*2+1);
  ImageFeature img(w,w,lf.zsize());

  string savefilename = cl.follow("", "-save");

  bool backtransform = false;
  PCA pca;
  if(cl.search("-unpca"))
  {
	  if( !pca.load(cl.follow("", "-unpca")) )
	  {
		  ERR << "Error loading PCA file!" << endl;
		  abort();
	  }
	  backtransform = true;
  }

  bool dontshow = cl.search("-dontshow");

  for(uint i=0;i<lf.size();++i) {
    DBG(10) << "lf " << i << endl;
    if(!backtransform) {
    	for(uint j=0;j<lf.dim();++j) {
    		img[j] = lf.getData()[i][j];
    	}
    } else {
    	vector<double> backtransformed = pca.backTransform(lf.getData()[i]);
    	for(uint j=0;j<(uint)pca.dim();++j) {
    		img[j] =  backtransformed[j];
    	}
    }
    if (sc != 1) {
      img = scale(img, w * sc, w * sc);
    }

    if (!nonorm) {
      normalize(img);
    }
    if(!dontshow) {
    	if(cl.search("-layerwise")) {

    		for(uint c=0;c<img.zsize();++c) {
    			BLINK(10) << " " << c;
    			img.display(c,c,c);
    		}
    		BLINK(10) << endl;
    	} else {
    		BLINK(10) << "all layers" << endl;;
    		img.display();
    	}
    }

    if (savefilename != "") {
    	std::ostringstream filenamewithnr;
    	filenamewithnr << savefilename << "_" << setw(3) << setfill('0') << i << ".png";
    	img.save(filenamewithnr.str());
    }
  }
}