void Eigenfaces::compute(const vector<Mat>& src, const vector<int>& labels) {
	if (src.size() == 0) {
		string error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
		CV_Error(CV_StsUnsupportedFormat, error_message);
	}
	// observations in row
	Mat data = asRowMatrix(src, CV_64FC1);
	// number of samples
	int n = data.rows;
	// dimensionality of data
	int d = data.cols;
	// assert there are as much samples as labels
	if (n != labels.size()) {
		string error_message = format("The number of samples (src) must equal the number of labels (labels). Was len(samples)=%d, len(labels)=%d.", n, labels.size());
		CV_Error(CV_StsBadArg, error_message);
	}
	// clip number of components to be valid
	if ((_num_components <= 0) || (_num_components > n))
		_num_components = n;
	// perform the PCA
	PCA pca(data, Mat(), CV_PCA_DATA_AS_ROW, _num_components);
	// copy the PCA results
	_mean = pca.mean.reshape(1, 1); // store the mean vector
	_eigenvalues = pca.eigenvalues.clone(); // eigenvalues by row
	_eigenvectors = transpose(pca.eigenvectors); // eigenvectors by column
	_labels = labels; // store labels for prediction
	// save projections
	for (int sampleIdx = 0; sampleIdx < data.rows; sampleIdx++) {
		Mat p = project(data.row(sampleIdx).clone());
		this->_projections.push_back(p);
	}
}
Esempio n. 2
0
void Pca::extract()
{
	Mat data = convVec(m_trainImages);
	int rows = data.rows;

	if (m_components <= 0 || m_components > rows)
		m_components = rows;

	PCA pca(data, Mat(), PCA::DATA_AS_ROW, int(m_components));

	m_data.clear();
	m_data.mean = pca.mean.clone().reshape(1, 1);
	m_data.eigenvalues = pca.eigenvalues.clone();
	transpose(pca.eigenvectors, m_data.eigenvectors);

	for (int i = 0; i < rows; i++) {
		Mat projected = pca.project(data.row(i).clone());
		m_data.train_projections.push_back(projected);
	}

	for (int i = 0; i < m_faces.size(); i++) {
		vector<Mat> projections;
		for (const auto &image : m_testImages[i]) {
			Mat projected = pca.project(convMat(image));
			projections.push_back(projected);
		}
		m_data.test_projections.push_back(projections);
	}

	m_data.labels = m_labels;
}
Esempio n. 3
0
void PCATestCase::decorrelation()
{
  OpenANN::RandomNumberGenerator rng;
  const int N = 100;
  const int D = 2;
  Eigen::MatrixXd X(N, D);
  rng.fillNormalDistribution(X);

  // Linear transformation (correlation)
  Eigen::MatrixXd A(D, D);
  A << 1.0, 0.5, 0.5, 2.0;
  Eigen::MatrixXd Xt = X * A.transpose();
  // Decorrelation (without dimensionality reduction)
  OpenANN::PCA pca(D);
  pca.fit(Xt);
  Eigen::MatrixXd Y = pca.transform(Xt);

  // Covariance matrix should be identity matrix
  Eigen::MatrixXd cov = Y.transpose() * Y;
  ASSERT_EQUALS_DELTA(cov(0, 0), (double) N, 1e-5);
  ASSERT_EQUALS_DELTA(cov(1, 1), (double) N, 1e-5);
  ASSERT_EQUALS_DELTA(cov(0, 1), 0.0, 1e-5);
  ASSERT_EQUALS_DELTA(cov(1, 0), 0.0, 1e-5);

  Eigen::VectorXd evr = pca.explainedVarianceRatio();
  double evrSum = evr.sum();
  ASSERT_EQUALS_DELTA(evrSum, 1.0, 1e-5);
}
Esempio n. 4
0
PCA ProjectorPCA::compressPCA(const Mat& pcaset, int maxComponents, const Mat& testset, Mat& compressed)
{
    PCA pca(pcaset, // pass the data
            Mat(), // we do not have a pre-computed mean vector,
            // so let the PCA engine to compute it
            CV_PCA_DATA_AS_ROW, // indicate that the vectors
            // are stored as matrix rows
            // (use CV_PCA_DATA_AS_COL if the vectors are
            // the matrix columns)
            maxComponents // specify, how many principal components to retain
            );
    // if there is no test data, just return the computed basis, ready-to-use
    if( !testset.data )
        return pca;
    CV_Assert( testset.cols == pcaset.cols );

    compressed.create(testset.rows, maxComponents, testset.type());

    Mat reconstructed;
    for( int i = 0; i < testset.rows; i++ )
    {
        Mat vec = testset.row(i), coeffs = compressed.row(i);
        // compress the vector, the result will be stored
        // in the i-th row of the output matrix
        pca.project(vec, coeffs);
        // and then reconstruct it
        pca.backProject(coeffs, reconstructed);
        // and measure the error
        //printf("");
    }
    return pca;
}
//------------------------------------------------------------------------------
// cv::Eigenfaces
//------------------------------------------------------------------------------
void cv::Eigenfaces::train(InputArray src, InputArray _lbls) {
    if(_lbls.getMat().type() != CV_32SC1) {
        string error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _lbls.type());
        error(cv::Exception(CV_StsUnsupportedFormat, error_message, "cv::Eigenfaces::train", __FILE__, __LINE__));
    }
    // get labels
    vector<int> labels = _lbls.getMat();
    // observations in row
    Mat data = asRowMatrix(src, CV_64FC1);
    // number of samples
    int n = data.rows;
    // dimensionality of data
    int d = data.cols;
    // assert there are as much samples as labels
    if(n != labels.size()) {
        string error_message = format("The number of samples (src) must equal the number of labels (labels). Was len(samples)=%d, len(labels)=%d.", n, labels.size());
        error(cv::Exception(CV_StsBadArg, error_message,  "cv::Eigenfaces::train", __FILE__, __LINE__));
    }
    // clip number of components to be valid
    if((_num_components <= 0) || (_num_components > n))
        _num_components = n;
    // perform the PCA
    PCA pca(data, Mat(), CV_PCA_DATA_AS_ROW, _num_components);
    // copy the PCA results
    _mean = pca.mean.reshape(1,1); // store the mean vector
    _eigenvalues = pca.eigenvalues.clone(); // eigenvalues by row
    _eigenvectors = transpose(pca.eigenvectors); // eigenvectors by column
    _labels = labels; // store labels for prediction
    // save projections
    for(int sampleIdx = 0; sampleIdx < data.rows; sampleIdx++) {
        Mat p = subspace::project(_eigenvectors, _mean, data.row(sampleIdx).clone());
        this->_projections.push_back(p);
    }
}
Esempio n. 6
0
// virtual
void GLinearRegressor::trainInner(const GMatrix& features, const GMatrix& labels)
{
	if(!features.relation().areContinuous())
		throw Ex("GLinearRegressor only supports continuous features. Perhaps you should wrap it in a GAutoFilter.");
	if(!labels.relation().areContinuous())
		throw Ex("GLinearRegressor only supports continuous labels. Perhaps you should wrap it in a GAutoFilter.");

	// Use a fast, but not-very-numerically-stable technique to compute an initial approximation for beta and epsilon
	clear();
	GMatrix* pAll = GMatrix::mergeHoriz(&features, &labels);
	Holder<GMatrix> hAll(pAll);
	GPCA pca(features.cols());
	pca.train(*pAll);
	size_t inputs = features.cols();
	size_t outputs = labels.cols();
	GMatrix f(inputs, inputs);
	GMatrix l(inputs, outputs);
	for(size_t i = 0; i < inputs; i++)
	{
		GVec::copy(f[i].data(), pca.basis()->row(i).data(), inputs);
		double sqmag = f[i].squaredMagnitude();
		if(sqmag > 1e-10)
			f[i] *= 1.0 / sqmag;
		l[i].set(pca.basis()->row(i).data() + inputs, outputs);
	}
	m_pBeta = GMatrix::multiply(l, f, true, false);
	m_epsilon.resize(outputs);
	GVecWrapper vw(pca.centroid().data(), m_pBeta->cols());
	m_pBeta->multiply(vw.vec(), m_epsilon, false);
	m_epsilon *= -1.0;
	GVec::add(m_epsilon.data(), pca.centroid().data() + inputs, outputs);

	// Refine the results using gradient descent
	refine(features, labels, 0.06, 20, 0.75);
}
Esempio n. 7
0
void Controller::openFile(QString filename){
    Parameters params;
    qDebug() << "Open parameters window";
    if(params.exec()==true)
    {
        qDebug() << "Starting row:" << params.getStartingRow();
        qDebug() << "Starting column:" << params.getStartingColumn();
        qDebug() << "Ending column:" << params.getEndingColumn();
        qDebug() << "Read file: " << filename.toLocal8Bit().constData();
        this->addFileToRecentlyOpen(filename);
        QFile file(filename);
        DataImport di(params.getStartingRow() - 1, params.getStartingColumn() - 1, params.getEndingColumn() - 1);
        Mat pcaInputData = di.parseData(file);
        bool success = pcaInputData.rows > 0;
        if(!success){
            qDebug() << "Error in: " << filename;
            return;
        }
        qDebug() << "Import status: " << success;
        MicroMatrixPCA pca(pcaInputData);
        Mat pca_pro = pca.projectAll();
        saveMat("pca.txt", pca_pro);
        Mat pca_backpro = pca.backProjectAll(100);
        saveMat("pca_back.txt", pca_backpro);
        QMap<int, float> map=pca.calculateErrors();
        PCAResultWindow *errorWindow = new PCAResultWindow(NULL, map);
        errorWindow->show();
        this->importedDataModel->addImportedFile(filename, pcaInputData.rows, pcaInputData.cols);
    }
}
Esempio n. 8
0
bool makeBOWModel(std::vector<BOWImg> &images, Mat &vocabulary, Mat &trainData, Mat &response)
{
	// Load training images
	std::cout<<"--->Loading training images ... "<<std::endl;
	int numImages = imgRead(images);
	if(numImages < 0)
		return false;
	std::cout<<"    "<<numImages<<" images loaded."<<std::endl;
	
	// Random shuffle samples
	std::random_shuffle (images.begin(), images.end());
	
	/*
		Get the ROI of images. 
		Feature detect and extract descriptors.
	*/
	printf("--->Extracting %s features ...\n", conf.extractor.c_str());	
	features(images, conf.extractor, conf.detector);
	
	BOWKMeansTrainer trainer(conf.numClusters,TermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,10,1.0));
	for(std::vector<BOWImg>::iterator iter = images.begin();iter != images.end(); iter++)
	{
		Mat tmp = iter->descriptor;
		trainer.add(tmp);
	}
	
	std::cout<<"--->Constructing vocabulary list ..."<<std::endl;	
	vocabulary = trainer.cluster();
	
	std::cout<<"--->Extracting BOW features ..."<<std::endl;
 	bowFeatures(images, vocabulary, conf.extractor);
 	
 	// Prepare traning data.
	Mat rawData;

	for(std::vector<BOWImg>::iterator iter = images.begin();iter != images.end(); iter++)
	{
		rawData.push_back(iter->BOWDescriptor);
		response.push_back(iter->label);
	}
	
	// PCA
#ifdef _USE_PCA_
	float factor = 1;
	int maxComponentsNum = static_cast<float>(conf.numClusters) * factor;
	PCA pca(rawData, Mat(),CV_PCA_DATA_AS_ROW, maxComponentsNum);
	Mat pcaData;
	for(int i = 0;i<rawData.rows;i++)
	{
		Mat vec = rawData.row(i);
		Mat coeffs = pca.project(vec);
		pcaData.push_back(coeffs);
	}	
	trainData = pcaData;
#else
	trainData = rawData;
#endif	
	return true;
}
Esempio n. 9
0
void Utilities::upload_2d(const PixelFormatDescriptor& pfd, GLsizei w, GLsizei h, const void *data)
{
    GLenum internalFormat_gl, format_gl, type_gl;
    toOpenGL(pfd, internalFormat_gl, format_gl, type_gl);
    PushClientAttrib pca(GL_CLIENT_PIXEL_STORE_BIT);

    glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
    GL_DEBUG(glTexImage2D)(GL_TEXTURE_2D, 0, internalFormat_gl, w, h, 0, format_gl, type_gl, data);
}
Esempio n. 10
0
void app_start(int, char**) {
	PCA9685 pca(i2c);
	pca.set_frequency(500);
	pca.set_pwm(0, 0, 4095);
	wait_ms(2000);
	pca.set_pwm(0, 0, 1000);
	wait_ms(2000);
	wait_ms(2000);
	pca.set_pwm(0, 0, 4095);
}
Esempio n. 11
0
  // 
  // PCA
  //
  void calcPCA (
      const cv::Mat &src, 
      cv::Mat &result,
      const int compress_dim)
  {
//    const int DIM=200; // 200次元
//    const int SAMPLES=1000; // 1000サンプル
    
    const int dim_ori = src.cols;
    const int num_vectors = src.rows;

//    const int RDIM = 3; // 圧縮後3次元
//    cv::Mat src(SAMPLES,DIM,CV_32FC1);
    result = cv::Mat (num_vectors, compress_dim,CV_32FC1); // DIMとSAMPLESのうち小さい方
    double val;

//    std::ifstream ifs("data.txt");
//
//    for(int j=0;j<SAMPLES;j++){
//      for(int i=0;i<DIM;i++){
//        ifs >> val;
//        ((float*)src.data)[j*src.cols+i]=val;
//      }
//    }

    // データが行単位で並んでいることを保証
    cv::PCA pca (src, cv::Mat(), CV_PCA_DATA_AS_ROW, 0);

    result = pca.project (src);

//    for(int j = 0;j<dim_ori;j++){
//      for(int i=0;i<compress_dim;i++){ // 上位RDIM次元だけを表示
//        std::cout << std::dec << ((float*)result.data)[j*result.cols+i] << ",";
//      }
//      std::cout << std::endl;
//    }

    cv::Mat evalues=pca.eigenvalues;
    float sum=0.0f;
    for(int i=0;i<pca.eigenvalues.rows;i++){
      sum+=((float*)evalues.data)[i];
    }

    float contribution=0.0f;
    for(int i=0;i<compress_dim;i++){// 上位RDIM次元の寄与率を計算
      contribution+=((float*)evalues.data)[i] / sum;
      std::cout << i+1 << "次元の累積寄与率:" << contribution << std::endl;
    }


  }
Esempio n. 12
0
bool cluster_aa(const QString& clips_path, const QString& detect_path, const QString& firings_out_path, const cluster_aa_opts& opts)
{
    Mda32 clips(clips_path);

    int M = clips.N1();
    int T = clips.N2();
    int L = clips.N3();

    Mda32 FF;
    {
        // do this inside a code block so memory gets released
        Mda32 clips_reshaped(M * T, L);
        int iii = 0;
        for (int ii = 0; ii < L; ii++) {
            for (int t = 0; t < T; t++) {
                for (int m = 0; m < M; m++) {
                    clips_reshaped.set(clips.value(m, t, ii), iii);
                    iii++;
                }
            }
        }

        Mda32 CC, sigma;
        pca(CC, FF, sigma, clips_reshaped, opts.num_features, false); //should we subtract the mean?
    }

    isosplit5_opts i5_opts;
    i5_opts.isocut_threshold = opts.isocut_threshold;
    i5_opts.K_init = opts.K_init;

    QVector<int> labels(L);
    isosplit5(labels.data(), opts.num_features, L, FF.dataPtr(), i5_opts);

    Mda detect(detect_path);
    int R = detect.N1();
    if (R < 3)
        R = 3;
    Mda firings(R, L);
    for (int i = 0; i < L; i++) {
        for (int r = 0; r < R; r++) {
            firings.setValue(detect.value(r, i), r, i); //important to use .value() here because otherwise it will be out of range
        }
        firings.setValue(labels[i], 2, i);
    }

    return firings.write64(firings_out_path);
}
Esempio n. 13
0
int main(int argc,char* argv[]) {
	if (argc < 5) {
		printf("%s ratio target id_start id_end\n",argv[0]);
		return 1;
	}

	float ratio = atof(argv[1]);
	int id_start = atoi(argv[3]);
	int id_end = atoi(argv[4]);

	Desc targetDesc = parseKeyFile(argv[2]);
	printf("Loaded %d (dim %d) keypoints from %s\n",targetDesc.n,targetDesc.dimension,argv[2]);
	
	int newDimension = 64;
	float* pca_basis = new float[newDimension * targetDesc.dimension];
	pca(targetDesc,newDimension,pca_basis);
	changeBasis(&targetDesc,newDimension,pca_basis);

	char buffer[128];
	struct timespec tic,toc;
	clock_gettime(CLOCK_MONOTONIC,&tic);
	for (int i=id_start;i<=id_end;i++) {
		sprintf(buffer,"%d.key",i);
		Desc sourceDesc = parseKeyFile(buffer);
		changeBasis(&sourceDesc,newDimension,pca_basis);
		std::vector<Match> match = findMatch(sourceDesc,targetDesc,ratio);
		sprintf(buffer,"%d.site.match",i);
		FILE* output = fopen(buffer,"w");
		for (size_t i=0;i<match.size();i++) {
			fprintf(output,"0 %f %f 1 %f %f\n",
				sourceDesc.x[match[i].id1],sourceDesc.y[match[i].id1],
				targetDesc.x[match[i].id2],targetDesc.y[match[i].id2]);
		}
		printf("Wrote %lu matches to %s\n",match.size(),buffer);
		fclose(output);

		delete[] sourceDesc.x;
		delete[] sourceDesc.y;
		delete[] sourceDesc.data;
	}
	clock_gettime(CLOCK_MONOTONIC,&toc);
	printf("Profiling: %fs for %d images\n",toc.tv_sec - tic.tv_sec + 0.000000001 * toc.tv_nsec - 0.000000001 * tic.tv_nsec,id_end-id_start+1);
	delete[] targetDesc.x;
	delete[] targetDesc.y;
	delete[] targetDesc.data;
	delete[] pca_basis;
}
void CharFeatureCollection::reduceMatrixDimensionsNewDim(const int newDim)
{
	PCA<float> pca(mDataMatrix);
	pca.computePCA();
	// reduce dimension:
//	const int index = pca.dimReductionIndex(thresh);
//	std::cout << "thresh = " << thresh << ", index = " << index << std::endl;
	ublas::matrix<float> reducedDataMatrix;
	pca.project(newDim, reducedDataMatrix);
	std::cout << "dimension before reduction: "
		<< mDataMatrix.size2() << std::endl;
	mDataMatrix = reducedDataMatrix;
	std::cout << "dimension after reduction: "
		<< mDataMatrix.size2() << std::endl;

	return;
} // end reduceMatrixDimensions(...)
Esempio n. 15
0
void SFMViewer::update(std::vector<cv::Point3d> pcld,
		std::vector<cv::Vec3b> pcldrgb,
		std::vector<cv::Point3d> pcld_alternate,
		std::vector<cv::Vec3b> pcldrgb_alternate,
		std::vector<cv::Matx34d> cameras) {
	m_pcld = pcld;
	m_pcldrgb = pcldrgb;
	m_cameras = cameras;

	//get the scale of the result cloud using PCA
	{
		cv::Mat_<double> cldm(pcld.size(), 3);
		for (unsigned int i = 0; i < pcld.size(); i++) {
			cldm.row(i)(0) = pcld[i].x;
			cldm.row(i)(1) = pcld[i].y;
			cldm.row(i)(2) = pcld[i].z;
		}
		cv::Mat_<double> mean; //cv::reduce(cldm,mean,0,CV_REDUCE_AVG);
		cv::PCA pca(cldm, mean, CV_PCA_DATA_AS_ROW);
		scale_cameras_down = 1.0 / (3.0 * sqrt(pca.eigenvalues.at<double> (0)));
//		std::cout << "emean " << mean << std::endl;
//		m_global_transform = Eigen::Translation<double,3>(-Eigen::Map<Eigen::Vector3d>(mean[0]));
	}

	//compute transformation to place cameras in world
	m_cameras_transforms.resize(m_cameras.size());
	Eigen::Vector3d c_sum(0,0,0);
	for (int i = 0; i < m_cameras.size(); ++i) {
		Eigen::Matrix<double, 3, 4> P = Eigen::Map<Eigen::Matrix<double, 3, 4,
				Eigen::RowMajor> >(m_cameras[i].val);
		Eigen::Matrix3d R = P.block(0, 0, 3, 3);
		Eigen::Vector3d t = P.block(0, 3, 3, 1);
		Eigen::Vector3d c = -R.transpose() * t;
		c_sum += c;
		m_cameras_transforms[i] =
				Eigen::Translation<double, 3>(c) *
				Eigen::Quaterniond(R) *
				Eigen::UniformScaling<double>(scale_cameras_down)
				;
	}

	m_global_transform = Eigen::Translation<double,3>(-c_sum / (double)(m_cameras.size()));
//	m_global_transform = m_cameras_transforms[0].inverse();

	updateGL();
}
void PCAKNN::train(const list<Interval> &intervals, const bool console_output) {
    if (intervals.size() == 0) return;
    projections.clear(); n = 0;

    width = intervals.front().start->face.cols;
    height = intervals.front().start->face.rows;

    list<Interval>::const_iterator itr;
    vector<Face>::const_iterator start, end;
    //add the number of images from all intervals
    for (itr = intervals.cbegin(); itr != intervals.cend(); itr++) {
        n += itr->Length();
    }

    Mat pca_matrix(static_cast<int>(n), width*height, data_type);
    int c = 0;
    for (itr = intervals.cbegin(); itr != intervals.cend(); itr++) {
        //for each image in the current interval
        for (start = itr->start, end = itr->end; start != end; start++, ++c) {
            if (console_output) printf("Preparing samples %d/%d\n", c + 1, n);
            //insert current image into pca_matrix
            Mat image_row = start->face.clone().reshape(1, 1);
            Mat row_i = pca_matrix.row(c);
            image_row.convertTo(row_i, data_type);//CV_64FC1 ?
            Face f; f.name = start->name;
            projections.push_back(f);//save the names for later
        }
    }

    if (console_output) printf("TRAINING...\n");
    //Perfrom principal component analysis on pca_matrix
    PCA pca(pca_matrix, Mat(), CV_PCA_DATA_AS_ROW, pca_matrix.rows);

    //extract mean/eigenvalues
    mean = pca.mean.reshape(1, 1);
    ev = pca.eigenvalues.clone();
    transpose(pca.eigenvectors, w);

    //project each face into subspace and save them with the name above for recognition
    for (unsigned int i = 0; i<n; ++i) {
        if (console_output) printf("Projecting %d/%d\n", i + 1, n);//project so subspace
        projections[i].face = subspaceProject(w, mean, pca_matrix.row(i));
    }
}
Esempio n. 17
0
void Utilities::upload_2d_mipmap(const PixelFormatDescriptor& pfd, GLsizei w, GLsizei h, const void *data)
{
    GLenum internalFormat_gl, format_gl, type_gl;
    toOpenGL(pfd, internalFormat_gl, format_gl, type_gl);
    PushClientAttrib pca(GL_CLIENT_PIXEL_STORE_BIT);

    glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
    GL_DEBUG(glTexImage2D)(GL_TEXTURE_2D, 0, internalFormat_gl, w, h, 0, format_gl, type_gl, data);
    
    if (w == 1 && h == 1) return;
    
    uint32_t alphaMask = pfd.getAlphaMask(),
        redMask = pfd.getRedMask(),
        greenMask = pfd.getGreenMask(),
        blueMask = pfd.getBlueMask();
    int bpp = pfd.getColourDepth().getDepth();
    
    SDL_Surface *surf = SDL_CreateRGBSurfaceFrom((void *)data, w, h, bpp, w * bpp / 8, redMask, greenMask, blueMask, alphaMask);
    SDL_assert(surf != nullptr);
    
    GLsizei newW = w;
    GLsizei newH = h;
    GLint level = 0;
    
    do {
        if (newW > 1) newW /= 2;
        if (newH > 1) newH /= 2;
        level++;
        
        SDL_Surface *newSurf = SDL_CreateRGBSurface(0, newW, newH, bpp, redMask, greenMask, blueMask, alphaMask);
        SDL_assert(newSurf != nullptr);
        
        /// @todo this is 'low-quality' and not thread-safe
        SDL_SoftStretch(surf, nullptr, newSurf, nullptr);
        
        GL_DEBUG(glTexImage2D)(GL_TEXTURE_2D, level, internalFormat_gl, newW, newH, 0, format_gl, type_gl, newSurf->pixels);
        SDL_FreeSurface(newSurf);
    } while (!(newW == 1 && newH == 1));
    
    SDL_FreeSurface(surf);
}
Esempio n. 18
0
void PCATestCase::dimensionalityReduction()
{
  OpenANN::RandomNumberGenerator rng;
  const int N = 100;
  const int D = 5;
  Eigen::MatrixXd X(N, D);
  rng.fillNormalDistribution(X);

  // Strong correlation
  Eigen::MatrixXd A = Eigen::MatrixXd::Identity(D, D) * 0.5 +
      Eigen::MatrixXd::Ones(D, D);
  Eigen::MatrixXd Xt = X * A.transpose();
  // Dimensionality reduction
  OpenANN::PCA pca(1);
  pca.fit(Xt);
  Eigen::MatrixXd Y = pca.transform(Xt);
  ASSERT_EQUALS(Y.rows(), N);
  ASSERT_EQUALS(Y.cols(), 1);
  ASSERT_EQUALS(pca.explainedVarianceRatio().rows(), 1);
  ASSERT(pca.explainedVarianceRatio().sum() > 0.9);
}
Esempio n. 19
0
int main(int argc, char *argv[]) {
	unsigned int n, d;
	double /* r, */phi/*, theta, sp, cp, st, ct*/, sample[DIM][NUM_SAMPLE];
	srand(time(0));
	/* generate sample points (with some pattern hidden among random data) */ 
	for (n = 0; n < NUM_SAMPLE; ++n) {
		/* spherical correlation */	
		/*
		phi = 2 * M_PI * rand() / RAND_MAX;
		theta = M_PI * rand() / RAND_MAX;
		sp = sin(phi);
		cp = cos(phi);
		st = sin(theta);
		ct = cos(theta);
		sample[0][n] = RADIUS * st * cp;
		sample[1][n] = RADIUS * st * sp;
		sample[2][n] = RADIUS * ct;
		*/
		
		/* conic, truncated by 1 half-plane */
		phi = M_PI * rand() / RAND_MAX;
		/* theta = M_PI * rand() / RAND_MAX; */
		sample[2][n] = RADIUS * rand() / RAND_MAX;
		sample[3][n] = 1000 * RADIUS * rand() / RAND_MAX;
		sample[0][n] = sample[2][n] * cos(phi);
		sample[1][n] = sample[2][n] * sin(phi);
		/*
		sample[4][n] = sample[3][n] * cos(theta);
		sample[5][n] = sample[3][n] * sin(theta);
		*/
		sample[4][n] = sample[3][n] * cos(phi);
		sample[5][n] = 1000 * sample[3][n] * sin(phi);

		/* compare with all-random data points: */
		/* for (d = 0; d < DIM; ++d) sample[n][d] = RADIUS * rand() / RAND_MAX; */
	}
	for (d = 6; d < DIM; ++d) for (n = 0; n < NUM_SAMPLE; ++n) sample[d][n] = d * (2 * RADIUS * rand() / RAND_MAX - RADIUS);  /* the other coordinates are all non-negative */
	pca(sample);
	return 0;
}
Esempio n. 20
0
int main(int argc, const char *argv[])
{
	int trainSize = 10;
	int testSize = 2;

	vector<vector<double>> euclids(crossvalidations, vector<double>(maxComponents, 0.0));
	vector<vector<double>> svms(crossvalidations, vector<double>(maxComponents, 0.0));
	vector<vector<double>> mahals(crossvalidations, vector<double>(maxComponents, 0.0));

	Pca pca(trainSize, testSize);
	const Pca::Data *pca_data = pca.getData();

	Euclid euclid(pca_data);
	Svm svm(pca_data);
	Mahal mahalanobis(pca_data);

	for (int i = 0; i < crossvalidations; i++) {
		for (int j = 1; j <= maxComponents; j++) {
			pca.setComponents(j);

			euclids[i][j - 1] = euclid.classify();
			svms[i][j - 1] = svm.classify();
			mahals[i][j - 1] = mahalanobis.classify();
		}
		pca.init();
	}

	vector<double> meanEuclid = mean(euclids);
	vector<double> meanSvm = mean(svms);
	vector<double> meanMahal = mean(mahals);

//	cout << "Euclid,Svm,Mahal" << endl;
	for (int i = 0; i< maxComponents; i++) {
		cout << meanEuclid[i] << "," << meanSvm[i] << "," << meanMahal[i] << endl;
	}

	return 0;
}
Esempio n. 21
0
void NormalEstimator::fitNormal(){
	//
	//compute surface normal
	int ptCnt = m_pt.size();
	if (ptCnt <= 0)
	{
		std::cout << "no point set provided" << std::endl;
		return;
	}
	ANNpointArray ptArray = annAllocPts(ptCnt, 3);
	//assign point values
	for (int i=0; i<ptCnt; i++) {
		cv::Vec3f pt = m_pt[i];
		ANNpoint ptPtr = ptArray[i];
		ptPtr[0] = pt[0];
		ptPtr[1] = pt[1];
		ptPtr[2] = pt[2];
	}

	///
	ANNkd_tree kdt(ptArray, ptCnt, 3);
	
	ANNpoint queryPt = annAllocPt(3);
	int nnCnt = 100;

	ANNidxArray nnIdx = new ANNidx[nnCnt];
	ANNdistArray nnDist = new ANNdist[nnCnt];

	float sigma = -1;
	float evalRatio = 0.05;
	//estimate sigma
	for (int i=0; i < ptCnt; i++) {
		cv::Vec3f pt = m_pt[i];
		queryPt[0] = pt[0];
		queryPt[1] = pt[1];
		queryPt[2] = pt[2];

		//kdt.annkSearch(queryPt,nnCnt, nnIdx, nnDist);
		kdt.annkSearch(queryPt,50, nnIdx, nnDist);
		if (nnDist[49] < sigma ||sigma == -1 )
		{
			sigma = nnDist[49];
		}
	}
	sigma = 0.001;
	std::cout << "search radius:" << sigma << std::endl;
	std::cout << "estimating normals for point set by PCA, be patient..." << std::endl;
	for (int i=0; i < ptCnt; i++) {
		cv::Vec3f pt = m_pt[i];
		queryPt[0] = pt[0];
		queryPt[1] = pt[1];
		queryPt[2] = pt[2];

		//kdt.annkSearch(queryPt,nnCnt, nnIdx, nnDist);
		kdt.annkFRSearch(queryPt, sigma, nnCnt, nnIdx, nnDist);
		int validCnt = 0;
		for (int j = 0; j < nnCnt; j++)
		{
			if (nnIdx[j] == ANN_NULL_IDX)
			{
				break;
			}
			validCnt++;
		}
		//std::cout << validCnt << std::endl;
		if (validCnt < 3)
		{
			continue;
		}
		
		cv::Mat pcaVec(validCnt,3,CV_64FC1);
		cv::Mat pcaMean(1,3,CV_64FC1);
		for (int j = 0; j < validCnt; j++)
		{
			pcaVec.at<double>(j,0) = m_pt[nnIdx[j]][0];
			pcaVec.at<double>(j,1) = m_pt[nnIdx[j]][1];
			pcaVec.at<double>(j,2) = m_pt[nnIdx[j]][2];
		}
		cv::PCA pca(pcaVec,cv::Mat(),CV_PCA_DATA_AS_ROW);

		if (pca.eigenvalues.at<double>(2,0) / pca.eigenvalues.at<double>(1,0) > evalRatio)
		{
			continue;
		}

		m_ptNorm[i] = cv::Vec3f(pca.eigenvectors.at<double>(2,0),pca.eigenvectors.at<double>(2,1),pca.eigenvectors.at<double>(2,2));
		float nr = cv::norm(m_ptNorm[i]);
		m_ptNorm[i][0] /= nr;
		m_ptNorm[i][1] /= nr;
		m_ptNorm[i][2] /= nr;
		//std::cout << m_ptNorm[i][0] << " " << m_ptNorm[i][1] << " " << m_ptNorm[i][2] << std::endl;
		m_ptNormFlag[i] = true;

	}

	//
	std::cout << "done..." << std::endl;
//////////////////////////////////////////////////////////////////////////

	//std::cout << "correct normal direction..." << std::endl;
	//sigma *= 1; //
	//nnCnt *= 1; //
	////reallocate the space for nn idx and nn dist array
	//delete [] nnDist;
	//delete [] nnIdx;
	//nnIdx = new ANNidx[nnCnt];
	//nnDist = new ANNdist[nnCnt];

	//int invertCnt = 0;
	//for (int i = 0; i < ptCnt; i++)
	//{
	//	if (!m_ptNormFlag[i])
	//	{
	//		continue;
	//	}
	//	
	//	cv::Vec3f pt = m_pt[i];
	//	queryPt[0] = pt[0];
	//	queryPt[1] = pt[1];
	//	queryPt[2] = pt[2];

	//	kdt.annkFRSearch(queryPt, sigma, nnCnt, nnIdx, nnDist);
	//	int validCnt = 0, normConsCnt = 0, distConsCnt = 0;
	//	for (int j = 0; j < nnCnt; j++)
	//	{
	//		if (nnIdx[j] == ANN_NULL_IDX)
	//		{
	//			break;
	//		}
	//		else{
	//			//check the direction
	//			cv::Vec3f v1 = m_ptNorm[i];
	//			cv::Vec3f v2 = m_ptNorm[nnIdx[j]];
	//			
	//			if (!m_ptNormFlag[nnIdx[j]])
	//			{
	//				continue;
	//			}else{
	//				//
	//				validCnt++;
	//				if( v2.ddot(v1) > 0 )
	//					normConsCnt++;
	//			}
	//		}
	//	}
	//	//inconsistency detected, invert the direction
	//	if (normConsCnt / validCnt < 0.9)
	//	{
	//		//std::cout << "invert" << std::endl;
	//		invertCnt++;
	//		m_ptNorm[i] = cv::Vec3f(0,0,0) - m_ptNorm[i];
	//	}
	//}
	//std::cout << "# of inverted vertex:" << invertCnt << std::endl;
	////////////////////////////////////////////////////////////////////////////
	
	annDeallocPt(queryPt);
	annDeallocPts(ptArray);
	delete [] nnDist;
	delete [] nnIdx;

}
Esempio n. 22
0
BOOL TabPage_6_OnCommand(HWND hDlg,WPARAM wParam,LPARAM lParm)
{
	HWND hEdit;

	hEdit = GetDlgItem(hDlg,EditControlID[0]);

	switch(LOWORD(wParam))
	{
		//开始计算(无文本)
	case PCA_START:
		{
			Exception excption;
			ErrNo err;

			PCA pca(hDlg);
			if(err = pca.GetStart_Calc())
			{
				excption.ErrorReport(hDlg,err);
				return FALSE;
			}
		}
		return TRUE;

		// 清除样本数据
	case PCA_DATA_CLEAR:
		Edit_SetText(hEdit,TEXT(""));
		break;

	case PCA_DRAW:
		{
			PCTSTR pErrInfo;

			int n_samples,n_index;
			double **data = NULL;

			TCHAR seps[] = TEXT(" ,\t");
			TCHAR szBuffer[MAXSIZE];
			int graph_type,*sample_num,ncount = 0,nLength;
			Exception excption;
			ErrNo err;

			//获取样本个数
			if(err = GetLine_Int(GetDlgItem(hDlg,EditControlID[2]),n_samples))
			{
				excption.ErrorReport(hDlg,err);
				return err;
			}
			//获取指标个数
			if(err = GetLine_Int(GetDlgItem(hDlg,EditControlID[3]),n_index))
			{
				excption.ErrorReport(hDlg,err);
				return err;
			}
			//获取画图类型
			graph_type = ComboBox_GetCurSel(GetDlgItem(hDlg,ComboBoxID));

			

			//获取样本数据
			//申请空间
			if(!(data = new double* [n_samples]))
			{
				excption.ErrorReport(hDlg,ALLOCATE_MEMORY_FAIL);
				return ALLOCATE_MEMORY_FAIL;
			}
			for(int i=0;i<n_samples;i++)
			{
				if(!(data[i] = new double [n_index]))
				{
					excption.ErrorReport(hDlg,ALLOCATE_MEMORY_FAIL);
					return ALLOCATE_MEMORY_FAIL;
				}
			}
			if(err = GetMatrix(GetDlgItem(hDlg,EditControlID[0]),data,n_samples,n_index))
			{
				excption.ErrorReport(hDlg,err);
				return err;
			}

			//判断是否要将全部样本画在图上
			if(Button_GetCheck(GetDlgItem(hDlg,CheckBoxID)) == BST_CHECKED)		//全部都画
			{
				if(err = PCA_DrawGraph(hDlg,data,List_Names,n_samples,n_index,graph_type))
				{
					excption.ErrorReport(hDlg,err);
					return err;
				}
				
			}
			else			//部分画
			{
				//获取要画图的样本数据编号
				Edit_GetText(GetDlgItem(hDlg,EditControlID[1]),szBuffer,MAXSIZE);

				ncount = 0;
				//判断有多少个样本数据编号
				nLength = _tcslen(szBuffer);
				for(int i=0;i<nLength;i++)
				{
					if(szBuffer[i] == TEXT(','))
						ncount++;
				}
				ncount++;

				//申请空间保存要画图的样本标号
				if(!(sample_num = new int [ncount]))
				{
					excption.ErrorReport(hDlg,ALLOCATE_MEMORY_FAIL);
					return ALLOCATE_MEMORY_FAIL;
				}
				if(err = StringToNumbericArr(szBuffer,seps,sample_num,ncount))
				{
					excption.ErrorReport(hDlg,err);
					return err;
				}
				//判断样本编号是否有误
				for(int i=0;i<ncount;i++)
				{
					if(sample_num[i] > n_samples)
					{
						pErrInfo = excption.FormatError(INCORRECT_DATA);
						_sntprintf_s(szBuffer,_countof(szBuffer),MAXSIZE,TEXT("错误ID:%d\r\n错误信息:%s\r\n补充信息:%s"),INCORRECT_DATA,pErrInfo,TEXT("样本编号超过样本个数"));
						MessageBox(hDlg,szBuffer,TEXT("错误"),MB_ICONERROR);
						return INCORRECT_DATA;
					}
				}

				QuickSort(sample_num,0,ncount-1);		//从小到大

				if(err = PCA_DrawGraph(hDlg,data,List_Names,n_samples,n_index,graph_type,false,sample_num,ncount))
				{
					excption.ErrorReport(hDlg,err);
					return err;
				}

			}
		}
		return TRUE;

	case PCA_NAME:
		{
			HINSTANCE hInstance;
			//创建模态对话框
			hInstance = GetModuleHandle(NULL);
			DialogBox(hInstance,MAKEINTRESOURCE(IDD_PCA_NAME),hDlg,PCA_AddNames_Proc);
		}
		return TRUE;

	case ALL_CHECK:			//要画图的样本数据要全部画在图上
		{
			if(Button_GetCheck(GetDlgItem(hDlg,CheckBoxID)) == BST_CHECKED)
				Edit_SetReadOnly(GetDlgItem(hDlg,EditControlID[1]),TRUE);
			else
				Edit_SetReadOnly(GetDlgItem(hDlg,EditControlID[1]),FALSE);
		}
		return TRUE;

	case RC_CLEAR:
		Edit_SetText(GetDlgItem(hDlg,OutPut_ResultID),TEXT(""));
		return TRUE;

	case RC_HELP:
		{

		}
		return TRUE;
	}
	return FALSE;
}
Esempio n. 23
0
void HullToObjectModule::completeBlobs(std::deque<SpRMMMobileObject> &objects,
                                               QImage *fg, QImage *current) {

    if(objects.size() == 0)
        return;
    int nbins = 256/m_bins;
    int i, j, w = fg->width(), h = fg->height(), k=0;
    int i0, j0, w0, h0;
    double maxVal;

    QImage curr888 = current->convertToFormat(QImage::Format_RGB888);
    cv::Mat c(h, w, CV_8UC3), c_yuv(h, w, CV_8UC3),
            f(h, w, CV_8UC1), f0(h, w, CV_8UC1), r(h, w, CV_8UC3);

    int bl = fg->bytesPerLine(), bl2 = curr888.bytesPerLine();
    std::deque<SpRMMMobileObject>::iterator it, it_end = objects.end();
    uchar d1, d2, d3,
          *fg_p = fg->bits(),
          *c_p = curr888.bits();

    memcpy(c.data, c_p, h*bl2);
    memcpy(f.data, fg_p, h*bl);
    memset(c_yuv.data, 0, h*bl2);
    memset(r.data, 0, h*bl2);

    f.copyTo(f0);

    cv::Rect roi;
    //Histogram parameters
    int channels[] = {1, 2};
    int histSize[] = {nbins, nbins};
    float pranges[] = { 0, 256 };
    const float* ranges[] = { pranges, pranges };

    //Rectangular structuring element
    cv::Mat element = cv::getStructuringElement( cv::MORPH_RECT,
                                           cv::Size( 3, 3 ),
                                           cv::Point( 1, 1 ) );

    //Set local window pixel histogram for comparison
    cv::Rect wroi;
    wroi.width = wroi.height = w_size/2;

    //Start blobs processing for hulls calculation
    for(it=objects.begin(); it!=it_end; it++) {
        k++;

        SpRMMMobileObject obj = (*it);
        SpHullModel newHull(new HullModel());
        Blob &b = obj->multiModel.binterface;
        i0 = b.bbox.ytop, j0 = b.bbox.xleft,
        h0 = b.bbox.height, w0 = b.bbox.width;
        if(j0 >= f.cols || i0 >= f.rows || h0 <= 0 || w0 <= 0) {
            m_data->hulls.push_back(newHull);
            continue;
        }        

        if(j0 < 0) {
            w0 += j0;
            j0 = 0;
            if(w0 <= 0) w0 = 1;
        }

        if(i0 < 0) {
            h0 += i0;
            i0 = 0;
            if(h0 <= 0) h0 = 1;
        }

        if(j0 + w0 > f.cols)
            w0 = f.cols - j0;

        if(i0 + h0 > f.rows)
            h0 = f.rows - i0;

        //Con la misma Mat f .....

        //Apertura en blob
        roi.x = j0;
        roi.y = i0;
        roi.width = w0;
        roi.height = h0;


/*        if(m_data->frameNumber == 962) {
            std::cout << "Error frame " << m_data->frameNumber << std::endl;
            std::cout << "Mobile id " << obj->mobile_id << std::endl;
            std::cout << "\troi.x: " << roi.x;
            std::cout << "\troi.y: " << roi.y;
            std::cout << ";\troi.width: " << roi.width;
            std::cout << ";\troi.height: " << roi.height;
            std::cout << ";\tf.cols: " << f.cols << std::endl;
        } */


        //Restrict operations to blob zone

        if(!(0 <= roi.x && 0 <= roi.width && roi.x + roi.width <= f.cols)) {
            std::cout << "Error frame " << m_data->frameNumber << std::endl;
            std::cout << "\troi.x: " << roi.x;
            std::cout << ";\troi.width: " << roi.width;
            std::cout << ";\tf.cols: " << f.cols << std::endl;
        }

        if(!(0 <= roi.y && 0 <= roi.height && roi.y + roi.height <= f.rows)) {
            std::cout << "Error frame " << m_data->frameNumber << std::endl;
            std::cout << "\troi.x: " << roi.y;
            std::cout << ";\troi.width: " << roi.height;
            std::cout << ";\tf.cols: " << f.rows << std::endl;
        }

        cv::Mat aux(f, roi);
        cv::Mat aux0(f0, roi);

        //Reduce bad detections, in general near borders
        cv::erode(aux, aux, element, cv::Point(-1,-1), 1);

        //Reduce bad detections, in general near borders and recover shape
        cv::erode(aux0, aux0, element, cv::Point(-1,-1), 1);
        cv::dilate(aux0, aux0, element, cv::Point(-1,-1), 1);

        //Border detection
        cv::Mat border_aux(aux.size(), CV_8UC1);
        cv::Canny(aux,border_aux, 50,100, 3);

#ifdef RSEG_DEBUG
//        cv::namedWindow( "Canny", 1 );
//        cv::imshow( "Canny", border_aux );
#endif

        //Find confining convex hull (Note: used border_copy as findContours modifies the image)
        std::vector<std::vector<cv::Point> > contours;
        std::vector<cv::Vec4i> hierarchy;
        cv::Mat border_copy(border_aux.size(), CV_8UC1);
        border_aux.copyTo(border_copy);

#ifdef __OPENCV3__
        cv::findContours(border_copy, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );
#else
        cv::findContours(border_copy, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );
#endif


#ifdef RSEG_DEBUG
/*        cv::Scalar color = cv::Scalar( 255, 255, 255);
        cv::Mat drawing = cv::Mat::zeros( border_aux.size(), CV_8UC3);
        for(i = 0; i< contours.size(); i++ )
            cv::drawContours( drawing, contours, i, color, 1, 8, hierarchy, 0, cv::Point() );
        cv::namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
        cv::imshow( "Contours", drawing );*/
#endif

        //One contour to confine all detected contours
        std::vector<cv::Point> big_contour;
        std::vector<cv::Point> hull;
        if(contours.size() > 0) {
            //Group found contours in one big contour
            for(i=0;i<contours.size(); i++) {
                if(hierarchy[i][2] < 0) { // No parent, so it's parent
                    if(big_contour.empty())
                        big_contour = contours[i];
                    else
                        big_contour.insert( big_contour.end(), contours[i].begin(), contours[i].end());
                }
            }
            //Get initial convex hull
            cv::convexHull( big_contour, hull, false );

#ifdef RSEG_DEBUG
            //Print contour and hull
            /*std::cout << "Hull" << std::endl;
            for(i=0; i<hull.size(); i++)
                std::cout << hull[i].x << "," << hull[i].y << std::endl;
            cv::Mat drawing2 = cv::Mat::zeros( border_aux.size(), CV_8UC3);
            cv::Scalar color = cv::Scalar( 255, 0, 255 );
            std::vector<std::vector<cv::Point> > drawc, drawh;
            drawc.push_back(big_contour);
            drawh.push_back(hull);
            color = cv::Scalar( 0, 0, 255 );
            cv::drawContours( drawing2, drawh, 0, color, 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point() );
            color = cv::Scalar( 255, 0, 255 );
            cv::drawContours( drawing2, drawc, 0, color, 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point() );
            cv::namedWindow( "Contour and Hull", CV_WINDOW_AUTOSIZE );
            cv::imshow( "Contour and Hull", drawing2 );*/
#endif
        } else {
            m_data->hulls.push_back(newHull);
            continue;
        }
        if(hull.size() == 0) {
            m_data->hulls.push_back(newHull);
            continue;
        }

        //Confine current image to blob, and get inverted foreground mask
        cv::Mat caux(c, roi), aux2 = 255 - aux;

        //COLOR HISTOGRAM
        //Get YCrCb image
        cv::Mat c_yuvaux(c_yuv, roi);
#ifdef __OPENCV3__
        cv::cvtColor(caux, c_yuvaux, cv::COLOR_BGR2YCrCb);
#else
        cv::cvtColor(caux, c_yuvaux, CV_BGR2YCrCb);
#endif

        //Calculate foreground and background chroma histograms
        cv::MatND hist, hist2;
        //Foreground
        cv::calcHist( &c_yuvaux, 1, channels, aux, // do not use mask
                  hist, 2, histSize, ranges,
                 true, // the histogram is uniform
                 false );
        maxVal=0;
        cv::minMaxLoc(hist, 0, &maxVal, 0, 0);
        hist = hist/maxVal;
        //Background
        cv::calcHist( &c_yuvaux, 1, channels, aux2, // do not use mask
                    hist2, 2, histSize, ranges,
                   true, // the histogram is uniform
                   false );
        maxVal=0;
        cv::minMaxLoc(hist2, 0, &maxVal, 0, 0);
        hist2 = hist2/maxVal;

        //Check correlation between color histograms:
        cv::MatND pixhist;
        for(i = i0; i < i0 + h0; i++ ) {
            for(j = j0; j < j0 + w0; j++ ) {
                //Just for points inside the convex hull and a little offset
                if(cv::pointPolygonTest(hull, cv::Point2f(j-j0,i-i0), true) > - m_hullOffset) {
                    if(f.data[i*bl+j]) { //Movement point
                       //Set augmented segmentation image
                       r.data[i*bl2+3*j] = r.data[i*bl2+3*j+1] = r.data[i*bl2+3*j+2] = 255; //White
                    } else { //Non-movement
                        //Check neighborhood for movement.
                       if(    j + w_size/2 >= w || i + w_size/2 >= h
                           || j - w_size/2 < 0  || i - w_size/2 < 0 )
                            continue;
                        wroi.x = j - w_size/2;
                        wroi.y = i - w_size/2;
                        if(movementFound(f, w_size, i, j, roi)) {
                            //Generate local histogram for comparison
                            cv::Mat c_yuvpix(c_yuv, wroi);
                            cv::calcHist( &c_yuvpix, 1, channels, cv::Mat(), // do not use mask
                                        pixhist, 2, histSize, ranges,
                                        true, // the histogram is uniform
                                        false );
                            maxVal = 0;
                            cv::minMaxLoc(pixhist, 0, &maxVal, 0, 0);
                            pixhist = pixhist/maxVal;

                            //Decide if background or foreground, comparing histograms
                            if(histogramDistance(hist,pixhist) < histogramDistance(hist2,pixhist)) {
                                r.data[i*bl2+3*j] = 255; //Red
                            }
                        }
                    }
                }
            }
        }
        //Integrate results with original mask
        for(i = i0; i < i0 + h0; i++ )
            for(j = j0; j < j0 + w0; j++ )
                if(f0.data[i*bl+j] != 0 || r.data[i*bl2+3*j] != 0 || r.data[i*bl2+3*j+1] != 0 || r.data[i*bl2+3*j+2] != 0) {
                    f.data[i*bl+j] = 255;
                    if(f0.data[i*bl+j] != 0)
                        r.data[i*bl2+3*j] = r.data[i*bl2+3*j+1] = r.data[i*bl2+3*j+2] = 255;
                }
        //Opening and Closing
        cv::erode(aux, aux, element);
        cv::dilate(aux, aux, element,cv::Point(-1,-1),2);
        cv::erode(aux, aux, element);

        //Recalculate Convex Hull
        cv::Canny(aux,border_aux, 50,100, 3);
        contours.clear();
        hierarchy.clear();
        big_contour.clear();
        hull.clear();
#ifdef __OPENCV3__
        cv::findContours(border_aux, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );
#else
        cv::findContours(border_aux, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );
#endif

        for(i=0;i<contours.size(); i++) {
            if(hierarchy[i][2] < 0) { // No parent, so it's parent
                if(big_contour.empty())
                    big_contour = contours[i];
                else
                    big_contour.insert( big_contour.end(), contours[i].begin(), contours[i].end());
            }
        }
        cv::convexHull( big_contour, hull, false );

        newHull->local_hull = hull;
        newHull->off_x = j0;
        newHull->off_y = i0;
        newHull->id = (*it)->mobile_id;

        //Get principal/minor axis
        std::vector<cv::Point2f> data_aux(h0*w0);
        float mean_x = 0, mean_y = 0;
        int count = 0;

        for(i=0; i<h0; i++)
            for(j=0; j<w0; j++)
                if(cv::pointPolygonTest(hull, cv::Point2f(j, i), true) > - m_hullOffset) {
                    data_aux[count++] = cv::Point2f(j, i);
                    mean_x += j;
                    mean_y += i;
                }
        //data_aux.resize(count);
        //cv::Mat data(2, count, CV_32FC1, &data_aux.front());
        cv::Mat data(2, count, CV_32FC1);
        cv::Point2f x;
        for(i=0; i<count; i++) {
            data.at<float>(0,i) = data_aux[i].x;
            data.at<float>(1,i) = data_aux[i].y;
        }

        //cv::Mat data();
        mean_x /= count;
        mean_y /= count;
        cv::Mat mean(2, 1, CV_32FC1);
        mean.at<float>(0) = mean_x;
        mean.at<float>(1) = mean_y;

        //2. perform PCA
#ifdef __OPENCV3__
        cv::PCA pca(data, mean, cv::PCA::DATA_AS_COL, maxComponents);
#else
        cv::PCA pca(data, mean, CV_PCA_DATA_AS_COL, maxComponents);
#endif
        //result is contained in pca.eigenvectors (as row vectors)
        //std::cout << pca.eigenvectors << std::endl;

        //3. get angle of principal axis
        float dx = pca.eigenvectors.at<float>(0, 0),
              dy = pca.eigenvectors.at<float>(0, 1),
              scale = 40.0;
        cv::Point3f rline;
        cv::Point2f r1, r2;

        //Get line general form from principal component
        getGeneralLineForm(cv::Point2f(mean_x, mean_y),
                           cv::Point2f(mean_x + dx*scale, mean_y + dy*scale),
                           rline);
        //Get segment from line
        int n1, n2;
        getContourToLineIntersection(hull, rline, r1, r2, &n1, &n2);

        //Get pixel intersections for normals
        std::vector< segment2D<float> > &segs = newHull->segs;
        std::vector< segment2D<float> > &hull_segs = newHull->hull_segs;

        //Get segments of movement normal to principal axis. Also reorders r1 and r2 in
        //coherence with segments order
        getNormalIntersections(aux, roi, hull, r1, r2, n1, n2, dx, dy, segs, hull_segs);

        newHull->axis1 = r1;
        newHull->axis2 = r2;


        //Set new representation
        m_data->hulls.push_back(newHull);

        //Get the pixel distance function
        std::vector<float> dfunction;
        //dfunction.resize((int)D_axis + 1); //

                //First and last are zero for sure (axis intersects contour).
        //dfunction[0] = 0.0;
        //dfunction[(int)D_axis] = 0.0;
        //for

#ifdef RSEG_DEBUG
        /*std::cout << "Final Hull" << std::endl;
        for(i=0; i<hull.size(); i++)
            std::cout << i << " : " << hull[i].x << " ; " << hull[i].y << std::endl;
*/
/*        std::cout << "Distances" << std::endl;
        for(i=0; i<segs.size(); i++) {
            double dx = segs[i].first.x - segs[i].last.x;
            double dy = segs[i].first.y - segs[i].last.y;
            std::cout << i << " : " << sqrt(dx*dx+dy*dy) << std::endl;
        }
        color = cv::Scalar( 0, 255, 255 );
        std::vector<std::vector<cv::Point> > drawc;
        drawc.push_back(hull);
        cv::Mat raux(r, roi);
        cv::drawContours( raux, drawc, 0, color, 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point() );
        color = cv::Scalar( 0, 255, 0 );
        cv::line(raux, r1, r2, color);
        cv::line(raux, cv::Point(mean_x - dx*scale, mean_y - dy*scale),
                       cv::Point(mean_x + dx*scale, mean_y + dy*scale), color);
        cv::namedWindow( "Final", CV_WINDOW_AUTOSIZE );
        cv::imshow( "Final", raux );*/
#endif
    }
    //Set datapool images
    memcpy(fg_p, f.data, h*bl);
    memcpy(m_data->rFgImage->bits(), r.data, h*bl2);

}
Esempio n. 24
0
const char* PerformArrayPCA(FILE* coordinatefile, FILE* pcfile)
{
  int i = 0;
  int j = 0;
  const int nmin = min(_rows,_columns);
  double** u = malloc(_columns*sizeof(double*));
  double** v = malloc(nmin*sizeof(double*));
  double* w = malloc(nmin*sizeof(double));
  double* m = malloc(_rows*sizeof(double));
  if (u)
  { for (i = 0; i < _columns; i++)
    { u[i] = malloc(_rows*sizeof(double));
      if (!u[i]) break;
    }
  }
  if (v)
  { for (j = 0; j < nmin; j++)
    { v[j] = malloc(nmin*sizeof(double));
      if (!v[j]) break;
    }
  }
  if (!u || !v || !w || !m || i < _columns || j < nmin)
  { if (u)
    { while (i--) free(u[i]);
      free(u);
    }
    if (v)
    { while (j--) free(v[j]);
      free(v);
    }
    if (w) free(w);
    if (m) free(m);
    return "Insufficient Memory for PCA calculation";
  }
  for (j = 0; j < _rows; j++)
  { double value;
    m[j] = 0.0;
    for (i = 0; i < _columns; i++)
    { value = _data[j][i];
      u[i][j] = value;
      m[j] += value;
    }
    m[j] /= _columns;
    for (i = 0; i < _columns; i++) u[i][j] -= m[j];
  }
  pca(_columns, _rows, u, v, w);
  fprintf(coordinatefile, "EIGVALUE");
  for (j=0; j < _columns; j++)
    fprintf(coordinatefile, "\t%s", _arrayname[j]);
  putc('\n', coordinatefile);
  fprintf(coordinatefile, "EWEIGHT");
  for (j=0; j < _columns; j++)
    fprintf(coordinatefile, "\t%f", _arrayweight[j]);
  putc('\n', coordinatefile);
  fprintf(pcfile, "%s\tNAME\tMEAN", _uniqID);
  for (j=0; j < nmin; j++)
    fprintf(pcfile, "\t%f", w[j]);
  putc('\n', pcfile);
  if (_rows>_columns)
  { for (i = 0; i < nmin; i++)
    { fprintf(coordinatefile, "%f", w[i]);
      for (j=0; j<_columns; j++)
        fprintf(coordinatefile, "\t%f", v[j][i]);
      putc('\n', coordinatefile);
    }
    for (i = 0; i < _rows; i++)
    { fprintf(pcfile, "%s\t",_geneuniqID[i]);
      if (_genename[i]) fputs(_genename[i], pcfile);
      else fputs(_geneuniqID[i], pcfile);
      fprintf(pcfile, "\t%f", m[i]);
      for (j=0; j<_columns; j++)
        fprintf(pcfile, "\t%f", u[j][i]);
      putc('\n', pcfile);
    }
  }
  else /* _rows < _columns */
  { for (i=0; i<_rows; i++)
    { fprintf(coordinatefile, "%f", w[i]);
      for (j=0; j<_columns; j++)
        fprintf(coordinatefile, "\t%f", u[j][i]);
      putc('\n', coordinatefile);
    }
    for (i = 0; i < _rows; i++)
    { fprintf(pcfile, "%s\t",_geneuniqID[i]);
      if (_genename[i]) fputs(_genename[i], pcfile);
      else fputs(_geneuniqID[i], pcfile);
      fprintf(pcfile, "\t%f", m[i]);
      for (j=0; j<nmin; j++)
        fprintf(pcfile, "\t%f", v[j][i]);
      putc('\n', pcfile);
    }
  }
  for (i = 0; i < _columns; i++) free(u[i]);
  for (i = 0; i < nmin; i++) free(v[i]);
  free(u);
  free(v);
  free(w); 
  free(m);
  return NULL;
}
Pole PolesExtractor::foreground2pole(cv::Mat &fg_mask) {
    cv::PCA pca;

    // create the points x,y using the foreground pixels
    std::vector< std::vector<double> > foreground_pixels;
    for (int i = 0; i < fg_mask.rows; ++i) {
        for (int j = 0; j < fg_mask.cols; ++j) {
            if (fg_mask.at<uchar>(i,j) > 0) {
                std::vector<double> pixel(2, 0);
                pixel[0] = i;
                pixel[1] = j;
                foreground_pixels.push_back(pixel);
            }
        }
    }
    
    cv::Mat pca_points(foreground_pixels.size(), foreground_pixels.at(0).size(), CV_64FC1);
    for(int i=0; i< pca_points.rows; ++i) {
        for(int j=0; j< pca_points.cols; ++j) {
            pca_points.at<double>(i, j) = foreground_pixels.at(i).at(j);
        }
    }

    pca(pca_points, cv::noArray(), CV_PCA_DATA_AS_ROW);
    
    cv::Point3f center;
    center.x = fg_mask.cols/2.0;
    center.y = fg_mask.rows/2.0;
    center.z = 1.0;

    cv::Point3f next_point;
    next_point.x = center.x + pca.eigenvectors.at<double>(0, 1)*sqrt(pca.eigenvalues.at<double>(0));
    next_point.y = center.y + pca.eigenvectors.at<double>(1, 1)*sqrt(pca.eigenvalues.at<double>(0));
    next_point.z = 1.0;

    cv::Point3f line;
    line = center.cross(next_point);

    cv::Point2f head_point(-1, -1);
    cv::Point2f feet_point(-1, -1);
    for (int y = 0; y < fg_mask.rows; ++y) {
        // using the line see the point in x
        double fx = ( -line.y*y -line.z) / line.x;
        int x = (int)fx;

        // this loop is running from the head to toes, so if the head was not
        // defined, set the value (the first non zero)
        if (head_point.x == -1 && head_point.y == -1 && fg_mask.at<uchar>(y,x) > 0) {
            head_point.x = x; head_point.y = y;
        }
        if (fg_mask.at<uchar>(y,x) > 0) {
            feet_point.x = x; feet_point.y = y;
        }
    }

    Pole resulting_pole;
    resulting_pole.head_point = head_point;
    resulting_pole.feet_point = feet_point;

    return resulting_pole;
}
Esempio n. 26
0
UT_Error IE_Imp_StarOffice::_loadFile(GsfInput * input)
{
	try {
		UT_DEBUGMSG(("SDW: Starting import\n"));
		mOle = GSF_INFILE (gsf_infile_msole_new(input, NULL));
		if (!mOle)
			return UT_IE_BOGUSDOCUMENT;

		// firstly, load metadata
		SDWDocInfo::load(mOle, getDoc());

		mDocStream = gsf_infile_child_by_name(mOle, "StarWriterDocument");
		if (!mDocStream)
			return UT_IE_BOGUSDOCUMENT;

		gsf_off_t size = gsf_input_size(mDocStream);

		if (!appendStrux(PTX_Section, PP_NOPROPS))
			return UT_IE_NOMEMORY;

		UT_DEBUGMSG(("SDW: Attempting to load DocHdr...\n"));
		mDocHdr.load(mDocStream);
		UT_DEBUGMSG(("SDW: ...success\n"));

		// Ask for and verify the password
		if (mDocHdr.cryptor) {
			if (!mDocHdr.cryptor->SetPassword(GetPassword().c_str())) {
				UT_DEBUGMSG(("SDW: Wrong password\n"));
				return UT_IE_PROTECTED;
			}
		}

		// do the actual reading
		char type;
		bool done = false;
		UT_uint32 recSize;
		while (!done) {
			if (gsf_input_tell(mDocStream) == size)
				break;
			readChar(mDocStream, type);
			gsf_off_t eor;
			readRecSize(mDocStream, recSize, &eor);

			switch (type) {
				case SWG_CONTENTS: {
					gsf_off_t flagsEnd = 0;
					UT_uint32 nNodes;
					// sw/source/core/sw3io/sw3sectn.cxx#L129
					if (mDocHdr.nVersion >= SWG_LAYFRAMES) {
						UT_uint8 flags;
						readFlagRec(mDocStream, flags, &flagsEnd);
					}
					if (mDocHdr.nVersion >= SWG_LONGIDX)
						streamRead(mDocStream, nNodes);
					else {
						if (mDocHdr.nVersion >= SWG_LAYFRAMES) {
							UT_uint16 sectidDummy;
							streamRead(mDocStream, sectidDummy);
						}
						UT_uint16 nodes16;
						streamRead(mDocStream, nodes16);
						nNodes = (UT_uint32)nodes16;
					}
					if (flagsEnd) {
						UT_ASSERT(flagsEnd >= gsf_input_tell(mDocStream));
						if (gsf_input_tell(mDocStream) != flagsEnd) {
							UT_DEBUGMSG(("SDW: have not read all flags\n"));
							if (gsf_input_seek(mDocStream, flagsEnd, G_SEEK_SET))
								return UT_IE_BOGUSDOCUMENT;
						}
					}
					bool done2 = false;
					UT_uint32 size2;
					while (!done2) {
						readChar(mDocStream, type);
						gsf_off_t eor2;
						readRecSize(mDocStream, size2, &eor2);

						switch (type) {
							case SWG_TEXTNODE: { // sw/source/core/sw3io/sw3nodes.cxx#L788
								UT_DEBUGMSG(("SDW: Found Textnode! (start at 0x%08llX end at 0x%08llX)\n", 
											 (long long)gsf_input_tell(mDocStream), 
											 (long long)eor2));
								UT_uint8 flags;
								gsf_off_t newPos;
								readFlagRec(mDocStream, flags, &newPos);
								// XXX check flags
								if (gsf_input_seek(mDocStream, newPos, G_SEEK_SET))
									return UT_IE_BOGUSDOCUMENT;

								// Read the actual text
								UT_UCS4Char* str;
								readByteString(mDocStream, str);
								UT_UCS4String textNode(str);
								free(str);
								UT_DEBUGMSG(("SDW: ...length=%zu contents are: |%s|\n", textNode.length(), textNode.utf8_str()));

								// now get the attributes
								UT_String attrs;
								UT_String pAttrs;
								UT_Vector charAttributes;
								while (gsf_input_tell(mDocStream) < eor2) {
									char attVal;
									streamRead(mDocStream, attVal);
									UT_uint32 attSize;
									gsf_off_t eoa; // end of attribute
									readRecSize(mDocStream, attSize, &eoa);
									if (attVal == SWG_ATTRIBUTE) {
										TextAttr* a = new TextAttr;
										streamRead(mDocStream, *a, eoa);
										UT_DEBUGMSG(("SDW: ...found text-sub-node, which=0x%x, ver=0x%x, start=%u, end=%u - data:%s len:%llu data is:",
													 a->which, a->ver, a->start,
													 a->end, a->data?"Yes":"No",
													 (long long unsigned)a->dataLen));
#ifdef DEBUG
										hexdump(a->data, a->dataLen);
                    putc('\n', stderr);
#endif
										charAttributes.addItem(a);
									}
									else if (attVal == SWG_ATTRSET) {
									  // bah, yet another loop
										UT_DEBUGMSG(("SDW: ...paragraph attributes found\n"));
										while (gsf_input_tell(mDocStream) < eoa) {
											// reusing attVal and attSize
											streamRead(mDocStream, attVal);
											gsf_off_t eoa2; // end of attribute
											readRecSize(mDocStream, attSize, &eoa2);
											if (attVal == SWG_ATTRIBUTE) {
												TextAttr a;
												streamRead(mDocStream, a, eoa2);
                        if (!a.attrVal.empty()) {
  												if (a.isPara)
	  												UT_String_setProperty(pAttrs, a.attrName, a.attrVal);
		  										else
			  										UT_String_setProperty(attrs, a.attrName, a.attrVal);
                        }
						UT_DEBUGMSG(("SDW: ......found paragraph attr, which=0x%x, ver=0x%x, start=%u, end=%u (string now %s) Data:%s Len=%lld Data:", a.which, a.ver, (a.startSet?a.start:0), (a.endSet?a.end:0), attrs.c_str(), (a.data ? "Yes" : "No"), (long long)a.dataLen));
#ifdef DEBUG
												hexdump(a.data, a.dataLen);
                        putc('\n', stderr);
#endif
											}
											if (gsf_input_seek(mDocStream, eoa2, G_SEEK_SET))
												return UT_IE_BOGUSDOCUMENT;
										}
									}
									else {
										UT_DEBUGMSG(("SDW: ...unknown attribute '%c' found (start=%" GSF_OFF_T_FORMAT " end=%" GSF_OFF_T_FORMAT ")\n", attVal, gsf_input_tell(mDocStream), eoa));
									}
									if (gsf_input_seek(mDocStream, eoa, G_SEEK_SET))
										return UT_IE_BOGUSDOCUMENT;
								}

								PP_PropertyVector attributes = {
									"props",
									pAttrs.c_str()
								};
								// first, insert the paragraph
								if (!appendStrux(PTX_Block, attributes))
									return UT_IE_NOMEMORY;

								UT_String pca(attrs); // character attributes for the whole paragraph
								// now insert the spans of text
								UT_uint32 len = textNode.length();
								UT_uint32 lastInsPos = 0;
								for (UT_uint32 i = 1; i < len; i++) {
									bool doInsert = false; // whether there was an attribute change
									for (UT_sint32 j = 0; j < charAttributes.getItemCount(); j++) {
										const TextAttr* a = reinterpret_cast<const TextAttr*>(charAttributes[j]);
										// clear the last attribute, if set
										if (a->endSet && a->end == (i - 1)) {
											if (a->isOff) {
												UT_String propval = UT_String_getPropVal(pca, a->attrName);
												UT_String_setProperty(attrs, a->attrName, propval);
											}
											else
												UT_String_removeProperty(attrs, a->attrName);
										}

										// now set new attribute, if needed
										if (a->startSet && a->start == (i - 1)) {
											if (a->isPara)
												UT_String_setProperty(pAttrs, a->attrName, a->attrVal);
											else if (a->isOff)
												UT_String_removeProperty(attrs, a->attrName);
											else
												UT_String_setProperty(attrs, a->attrName, a->attrVal);
										}

										// insert if this is the last character, or if there was a format change
										if ((a->endSet && a->end == i) || (a->startSet && a->start == i))
											doInsert = true;
									}
									if (doInsert || i == (len - 1)) {
										attributes[1] = attrs.c_str();
										UT_DEBUGMSG(("SDW: Going to appendFmt with %s\n", attributes[1].c_str()));
										if (!appendFmt(attributes))
											return UT_IE_NOMEMORY; /* leave cast alone! */
										UT_DEBUGMSG(("SDW: About to insert %u-%u\n", lastInsPos, i));
										size_t spanLen = i - lastInsPos;
										if (i == (len - 1)) spanLen++;
										UT_UCS4String span = textNode.substr(lastInsPos, spanLen);
										appendSpan(span.ucs4_str(), spanLen);
										lastInsPos = i;
									}
								}

								UT_VECTOR_PURGEALL(TextAttr*, charAttributes);
								break;

							}
							case SWG_JOBSETUP: {
								// flags are apparently unused here. no idea why they are there.
								gsf_off_t newpos;
								UT_uint8 flags;
								readFlagRec(mDocStream, flags, &newpos);
								if (gsf_input_seek(mDocStream, newpos, G_SEEK_SET))
									return UT_IE_BOGUSDOCUMENT;
								UT_uint16 len, system;
								streamRead(mDocStream, len);
								streamRead(mDocStream, system);
								char printerName[64];
								streamRead(mDocStream, printerName, 64);
								char deviceName[32], portName[32], driverName[32];
								streamRead(mDocStream, deviceName, 32);
								streamRead(mDocStream, portName, 32);
								streamRead(mDocStream, driverName, 32);
								UT_DEBUGMSG(("SDW: Jobsetup: len %u sys 0x%x printer |%.64s| device |%.32s| port |%.32s| driver |%.32s|\n", len, system, printerName, deviceName, portName, driverName));

								if (system == JOBSET_FILE364_SYSTEM || system == JOBSET_FILE605_SYSTEM) {
									UT_uint16 len2, system2;
									streamRead(mDocStream, len2);
									streamRead(mDocStream, system2);
									UT_uint32 ddl; // driver data length
									streamRead(mDocStream, ddl);
									// now the interesting data
									UT_uint16 orient; // 0=portrait 1=landscape
									streamRead(mDocStream, orient);
									UT_uint16 paperBin;
									streamRead(mDocStream, paperBin);
									UT_uint16 paperFormat;
									streamRead(mDocStream, paperFormat);
									UT_uint32 width, height;
									streamRead(mDocStream, width);
									streamRead(mDocStream, height);
									UT_DEBUGMSG(("SDW: orient %u bin %u format %u width %u height %u\n", orient, paperBin, paperFormat, width, height));
									// rest of the data is ignored, seems to be printer specific anyway.
									// Use A4, Portrait by default
									PP_PropertyVector attributes = {
										"pagetype", "a4", // A4/Letter/...
										"orientation", "portrait",
										"width", "210",
										"height", "297",
										"units", "mm"
									};
									const char* sdwPaperToAbi[] = {
										"A3",
										"A4",
										"A5",
										"B4",
										"B5",
										"Letter",
										"Legal",
										"Tabloid/Ledger",
										"Custom"
									};
									if (paperFormat < sizeof(sdwPaperToAbi)/sizeof(*sdwPaperToAbi)) {
										attributes[1] = sdwPaperToAbi[paperFormat];
                                    }
									const char* sdwOrientToAbi[] = {
										"portrait",
										"landscape"
									};
									if (orient < sizeof(sdwOrientToAbi)/sizeof(*sdwOrientToAbi)) {
										attributes[3] = sdwOrientToAbi[orient];
                                    }
									attributes[5] = UT_std_string_sprintf("%f", static_cast<double>(width)/100);
									attributes[7] = UT_std_string_sprintf("%f", static_cast<double>(height)/100);

									getDoc()->setPageSizeFromFile(attributes);
								}
								break;

							}
							case SWG_EOF:
								done2 = true;
								break;
							default:
								UT_DEBUGMSG(("SDW: SWG_CONTENT: Skipping %u bytes for record type '%c' (starting at 0x%08llX)\n",
											 size2, type,
											 (long long)gsf_input_tell(mDocStream)));
						}
						if (gsf_input_seek(mDocStream, eor2, G_SEEK_SET))
							return UT_IE_BOGUSDOCUMENT;
					}
					break;
				}
				case SWG_STRINGPOOL:
				{
					if (mDocHdr.nVersion <= SWG_POOLIDS) {
						UT_ASSERT_HARMLESS(UT_NOT_IMPLEMENTED);
						break;
					}
					UT_uint8 encoding;
					streamRead(mDocStream, encoding);
					UT_iconv_t cd = findConverter(encoding);
					if (!UT_iconv_isValid(cd))
						throw UT_IE_IMPORTERROR;
					UT_uint16 count;
					streamRead(mDocStream, count);
					while (count--) {
						UT_uint16 id;
						streamRead(mDocStream, id);
						char* str;
						UT_uint16 len;
						::readByteString(mDocStream, str, &len);
						if (id == IDX_NOCONV_FF) {
							UT_ASSERT_HARMLESS(UT_NOT_IMPLEMENTED);
						}
						// FIXME: find a way to not have to copy and free 
						// the result of UT_convert_cd.... --hub
						UT_DEBUGMSG(("SDW: StringPool: found 0x%04x <-> %.*s\n", id, len, str));
						UT_UCS4Char* convertedString = reinterpret_cast<UT_UCS4Char*>(UT_convert_cd(str, len + 1, cd, NULL, NULL));
						mStringPool.insert(stringpool_map::value_type(id, convertedString));
						FREEP(convertedString);
                        delete [] str;
					}
                    UT_iconv_close(cd);
					break;
				}
				case SWG_COMMENT: // skip over comments
					break;
				case SWG_EOF:
					done = true;
					break;
				default:
					UT_DEBUGMSG(("SDW: Skipping %u bytes for record type '%c' (starting at 0x%08llX)\n", recSize, type, (long long)gsf_input_tell(mDocStream)));
			}
			// Seek to the end of the record, in case it wasn't read completely
			if (gsf_input_seek(mDocStream, eor, G_SEEK_SET))
				return UT_IE_BOGUSDOCUMENT;
		}

		UT_DEBUGMSG(("SDW: Done\n"));

		return UT_OK;
	}
	catch(UT_Error e) {
		UT_DEBUGMSG(("SDW: error %d\n", e));
		return e;
	}
	catch(...) {
		UT_DEBUGMSG(("SDW: Unknown error\n"));
		return UT_IE_BOGUSDOCUMENT;
	}
}
std::vector<PlanarHorizontalSegment> PlaneExtractor::GetPlanesRANSAC(cv::Mat pointCloud, std::vector<cv::Point2i> indexes, int minPointsForPlane, double distThresholdForPlane, int maxIterations, cv::Mat& out_planesMask)
{
	
	float distToPlane; 
	cv::Point3f xyzPoint; 
	double dist1;
	double dist2;
	double dist3;

	std::vector<PlanarHorizontalSegment> horizontalPlanes; 
	std::vector< std::vector< cv::Point2i > > indexesPlanes; 

	out_planesMask = cv::Mat::zeros( pointCloud.rows, pointCloud.cols, CV_8UC1); 

	int iterationsCnt = 0; 
	while( iterationsCnt++ < maxIterations && indexes.size() > minPointsForPlane )
	{
		// Getting Random points 
		cv::Point3f p1 = pointCloud.at<cv::Vec3f>( indexes[rand() % indexes.size() ] );  
		cv::Point3f p2 = pointCloud.at<cv::Vec3f>( indexes[rand() % indexes.size() ] ); 
		cv::Point3f p3 = pointCloud.at<cv::Vec3f>( indexes[rand() % indexes.size() ] ); 

		// Checking that are valid points for plane, ej. not paralls
		if( !Plane3D::AreValidPointsForPlane( p1, p2, p3 ) )
			continue;
		
		// Checking not so close points
		dist1 = cv::norm(p1 - p2);
		dist2 = cv::norm(p1 - p3); 
		dist3 = cv::norm(p2 - p3); 
		if(dist1 < 0.2 || dist2 < 0.2 || dist3 < 0.2 )
			continue; 

		// Calculating candidate plane
		Plane3D candidatePlane( p1, p2, p3 ); 

		if( std::abs( candidatePlane.GetNormal().z ) < 0.99 ){			
			continue; 
		}

		// Checking distance to candidate plane of points
		std::vector< cv::Point3f > inliers; 
		inliers.reserve( indexes.size() ); 

		for(size_t i=0; i<indexes.size(); i++ ){
			
			xyzPoint = pointCloud.at<cv::Vec3f>(indexes[i]); 
			distToPlane = candidatePlane.DistanceToPoint( xyzPoint );
			
			if( distToPlane < distThresholdForPlane )
				inliers.push_back( xyzPoint ); 
		}

		// If there are few inliers discard
		if( inliers.size() < minPointsForPlane )
			continue; 

		// Getting a better plane using PCA analisys
		cv::PCA pca( cv::Mat(inliers).reshape(1), cv::Mat(), CV_PCA_DATA_AS_ROW);

		// Creating new plane with a normal(eigenvector with lowest eigenvalue) and a point (mean)
		cv::Point3f pcaNormal(pca.eigenvectors.at<float>(2,0), pca.eigenvectors.at<float>(2,1), pca.eigenvectors.at<float>(2,2)); 
		cv::Point3f pcaPoint(pca.mean.at<float>(0,0), pca.mean.at<float>(0,1), pca.mean.at<float>(0,2)); 		
		
		Plane3D refinedPlane(pcaNormal, pcaPoint); 

		// Checking for new inliers
		
		// this is for not removing elemnts from list due speed. memory vs speed
		std::vector< cv::Point2i > newIndexes; 
		newIndexes.reserve( indexes.size() );
		// this is for not removing elemnts from list due speed. memory vs speed
		std::vector< cv::Point3f > newInliers; 
		newInliers.reserve( indexes.size() ); 
		// this is for not removing elemnts from list due speed. memory vs speed
		std::vector< cv::Point2f > pointsXY_forConvexHull; 
		pointsXY_forConvexHull.reserve( indexes.size() ); 
		// this is for not removing elemnts from list due speed. memory vs speed
		std::vector< cv::Point2i > indexPlane; 
		indexPlane.reserve( indexes.size() ); 

		for(size_t i=0; i<indexes.size(); i++ ){
			
			xyzPoint = pointCloud.at<cv::Vec3f>(indexes[i]); 
			distToPlane = refinedPlane.DistanceToPoint( xyzPoint );

			if( distToPlane < distThresholdForPlane ){
				newInliers.push_back( xyzPoint ); 
				pointsXY_forConvexHull.push_back( cv::Point2f( xyzPoint.x , xyzPoint.y ) );
				indexPlane.push_back( indexes[i] );

				out_planesMask.at<uchar>( indexes[i] ) = 255; 
			}
			else
				newIndexes.push_back(indexes[i]);
		}

		indexes = newIndexes; 
		indexesPlanes.push_back( indexPlane ); 

		// Getting Convex Hull ( Convex hull is valid if remove Z, because is an horizontal plane ) 
		std::vector< cv::Point2f > convexHull2D; 
		//cv::convexHull( pointsXY_forConvexHull , convexHull2D); 		
		// Creating Horizontal Planar Segment
		PlanarHorizontalSegment ps( newInliers, refinedPlane, pca, convexHull2D, indexPlane); 
		// Adding to vector to return
		horizontalPlanes.push_back( ps );
	}

	return horizontalPlanes; 
}
Esempio n. 28
0
double test(cv::Mat &vocabulary, void *src)
{
	// Test	
	std::vector<BOWImg> images;
	conf.max_num = conf.max_num * 2;
	
	std::cout<<"--->Loading testing images ... "<<std::endl;
	int numImages = imgRead(images);
	std::cout<<"    "<<numImages<<" images loaded."<<std::endl;
	if(numImages < 0)
		return -1;
		
	printf("--->Extracting %s features ...\n", conf.extractor.c_str());	
	features(images, conf.extractor, conf.detector);
	
	std::cout<<"--->Extracting BOW features ..."<<std::endl;
	bowFeatures(images, vocabulary, conf.extractor);
	
	cv::Mat rawData;
	for(std::vector<BOWImg>::iterator iter = images.begin();iter != images.end(); iter++)
		rawData.push_back(iter->BOWDescriptor);
		
	//PCA	
#ifdef _USE_PCA_
	float factor = 1;
	int maxComponentsNum = static_cast<float>(conf.numClusters) * factor;
	cv::PCA pca(rawData, Mat(),CV_PCA_DATA_AS_ROW, maxComponentsNum);
	cv::Mat pcaData;
	for(int i = 0;i<rawData.rows;i++)
	{
		cv::Mat vec = rawData.row(i);
		cv::Mat coeffs = pca.project(vec);
		pcaData.push_back(coeffs);
	}	
	cv::Mat testData = pcaData;
#else
	cv::Mat testData = rawData;
#endif

	std::cout<<"--->Executing predictions ..."<<std::endl;
	cv::Mat output;
	double ac = 0;
	double ac_rate = 0;
	if(conf.classifier == "BP")
	{
		CvANN_MLP *classifier = (CvANN_MLP *)src;
		classifier->predict(testData,output);
		cout<<"--->Predict answer: "<<std::endl;
		for(int i = 0;i < output.rows;i++)
		{
			float *p = output.ptr<float>(i);
			int k = 0;
			int tmp = 0;
			for(int j = 0;j < output.cols;j++)
			{	
				if(p[j] > tmp )
				{
					tmp = p[j];
					k = j;
				}
			}
			std::cout<<"    "<<images[i].imgName<<" ---- "<<conf.classes[k]<<endl;
			if(images[i].label == k+1)
				ac++;
		}
		ac_rate = ac / static_cast<double>(output.rows);
	}
	else if(conf.classifier == "SVM")
	{
		CvSVM *classifier = (CvSVM *)src;
		classifier->predict(testData,output);
		cout<<"--->Predict answer: "<<std::endl;
		for(int i = 0;i < output.rows;i++)
		{
			int k = (int)output.ptr<float>()[i]-1;
			std::cout<<"    "<<images[i].imgName<<" ---- "<<conf.classes[k]<<endl;
			if(images[i].label == k+1)
				ac++;
		}
		ac_rate = ac / static_cast<double>(output.rows);
	}
	else {
		std::cout<<"--->Error: wrong classifier."<<std::endl;
	}
	return ac_rate;
}
Esempio n. 29
0
//------------------------------------------------------------------------------
// Fisherfaces
//------------------------------------------------------------------------------
void Fisherfaces::train(InputArrayOfArrays src, InputArray _lbls) {
    if(src.total() == 0) {
        String error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
        CV_Error(Error::StsBadArg, error_message);
    } else if(_lbls.getMat().type() != CV_32SC1) {
        String error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _lbls.type());
        CV_Error(Error::StsBadArg, error_message);
    }
    // make sure data has correct size
    if(src.total() > 1) {
        for(int i = 1; i < static_cast<int>(src.total()); i++) {
            if(src.getMat(i-1).total() != src.getMat(i).total()) {
                String error_message = format("In the Fisherfaces method all input samples (training images) must be of equal size! Expected %d pixels, but was %d pixels.", src.getMat(i-1).total(), src.getMat(i).total());
                CV_Error(Error::StsUnsupportedFormat, error_message);
            }
        }
    }
    // get data
    Mat labels = _lbls.getMat();
    Mat data = asRowMatrix(src, CV_64FC1);
    // number of samples
    int N = data.rows;
    // make sure labels are passed in correct shape
    if(labels.total() != (size_t) N) {
        String error_message = format("The number of samples (src) must equal the number of labels (labels)! len(src)=%d, len(labels)=%d.", N, labels.total());
        CV_Error(Error::StsBadArg, error_message);
    } else if(labels.rows != 1 && labels.cols != 1) {
        String error_message = format("Expected the labels in a matrix with one row or column! Given dimensions are rows=%s, cols=%d.", labels.rows, labels.cols);
       CV_Error(Error::StsBadArg, error_message);
    }
    // clear existing model data
    _labels.release();
    _projections.clear();
    // safely copy from cv::Mat to std::vector
    std::vector<int> ll;
    for(unsigned int i = 0; i < labels.total(); i++) {
        ll.push_back(labels.at<int>(i));
    }
    // get the number of unique classes
    int C = (int) remove_dups(ll).size();
    // clip number of components to be a valid number
    if((_num_components <= 0) || (_num_components > (C-1)))
        _num_components = (C-1);
    // perform a PCA and keep (N-C) components
    PCA pca(data, Mat(), PCA::DATA_AS_ROW, (N-C));
    // project the data and perform a LDA on it
    LDA lda(pca.project(data),labels, _num_components);
    // store the total mean vector
    _mean = pca.mean.reshape(1,1);
    // store labels
    _labels = labels.clone();
    // store the eigenvalues of the discriminants
    lda.eigenvalues().convertTo(_eigenvalues, CV_64FC1);
    // Now calculate the projection matrix as pca.eigenvectors * lda.eigenvectors.
    // Note: OpenCV stores the eigenvectors by row, so we need to transpose it!
    gemm(pca.eigenvectors, lda.eigenvectors(), 1.0, Mat(), 0.0, _eigenvectors, GEMM_1_T);
    // store the projections of the original data
    for(int sampleIdx = 0; sampleIdx < data.rows; sampleIdx++) {
        Mat p = LDA::subspaceProject(_eigenvectors, _mean, data.row(sampleIdx));
        _projections.push_back(p);
    }
}
Esempio n. 30
0
/**
 * This function trains one of the classes of the given machine with the given data.
 * It computes either BIC projection matrices, or IEC mean and variance.
 *
 * @param  clazz    false for the intrapersonal class, true for the extrapersonal one.
 * @param  machine  The machine to be trained.
 * @param  differences  A set of (intra/extra)-personal difference vectors that should be trained.
 */
void bob::learn::em::BICTrainer::train_single(bool clazz, bob::learn::em::BICMachine& machine, const blitz::Array<double,2>& differences) const {
  int subspace_dim = clazz ? m_M_E : m_M_I;
  int input_dim = differences.extent(1);
  int data_count = differences.extent(0);
  blitz::Range a = blitz::Range::all();

  if (subspace_dim){
    // train the class using BIC

    // Compute PCA on the given dataset
    bob::learn::linear::PCATrainer trainer;
    const int n_eigs = trainer.output_size(differences);
    bob::learn::linear::Machine pca(input_dim, n_eigs);
    blitz::Array<double,1> variances(n_eigs);
    trainer.train(pca, variances, differences);

    // compute rho
    double rho = 0.;
    int non_zero_eigenvalues = std::min(input_dim, data_count-1);
    // assert that the number of kept eigenvalues is not chosen to big
    if (subspace_dim >= non_zero_eigenvalues)
      throw std::runtime_error((boost::format("The chosen subspace dimension %d is larger than the theoretical number of nonzero eigenvalues %d")%subspace_dim%non_zero_eigenvalues).str());
    // compute the average of the reminding eigenvalues
    for (int i = subspace_dim; i < non_zero_eigenvalues; ++i){
      rho += variances(i);
    }
    rho /= non_zero_eigenvalues - subspace_dim;

    // limit dimensionalities
    pca.resize(input_dim, subspace_dim);
    variances.resizeAndPreserve(subspace_dim);

    // check that all variances are meaningful
    for (int i = 0; i < subspace_dim; ++i){
      if (variances(i) < 1e-12)
        throw std::runtime_error((boost::format("The chosen subspace dimension is %d, but the %dth eigenvalue is already to small")%subspace_dim%i).str());
    }

    // initialize the machine
    blitz::Array<double, 2> projection = pca.getWeights();
    blitz::Array<double, 1> mean = pca.getInputSubtraction();
    machine.setBIC(clazz, mean, variances, projection, rho);
  } else {
    // train the class using IEC
    // => compute mean and variance only
    blitz::Array<double,1> mean(input_dim), variance(input_dim);

    // compute mean and variance
    mean = 0.;
    variance = 0.;
    for (int n = data_count; n--;){
      const blitz::Array<double,1>& diff = differences(n,a);
      assert(diff.shape()[0] == input_dim);
      for (int i = input_dim; i--;){
        mean(i) += diff(i);
        variance(i) += sqr(diff(i));
      }
    }
    // normalize mean and variances
    for (int i = 0; i < input_dim; ++i){
      // intrapersonal
      variance(i) = (variance(i) - sqr(mean(i)) / data_count) / (data_count - 1.);
      mean(i) /= data_count;
      if (variance(i) < 1e-12)
        throw std::runtime_error((boost::format("The variance of the %dth dimension is too small. Check your data!")%i).str());
    }

    // set the results to the machine
    machine.setIEC(clazz, mean, variance);
  }
}