Пример #1
0
    DictionaryTrajectory::DictionaryTrajectory(std::string baseFolder, double az, double bz) : Trajectory() {

        this->baseFolder = baseFolder;
        vector<string> files = getFilesInDirectory(baseFolder);

        queryFiles = sortPrefix(files, "query");
        trajFiles = sortPrefix(files, "traj");
        dmpFiles = sortPrefix(files, "dmp");

        if(dmpFiles.size() == 0) {

            // learn dmps
            queryPoints = mapFiles(queryFiles, trajFiles, "query", "traj");
            KUKADU_SHARED_PTR<JointDMPLearner> dmpLearner;

            vector<mat> jointsVec;
            double tMax = 0.0;
            for(int i = 0; i < queryPoints.size(); ++i) {

                mat joints = readMovements((string(baseFolder) + string(queryPoints.at(i).getFileDataPath())).c_str());
                degOfFreedom = joints.n_cols - 1;

                queryPoints.at(i).setQueryPoint(readQuery(string(baseFolder) + string(queryPoints.at(i).getFileQueryPath())));
                jointsVec.push_back(joints);
                double currentTMax = joints(joints.n_rows - 1, 0);

                if(tMax < currentTMax)
                        tMax = currentTMax;

            }

            for(int i = 0; i < jointsVec.size(); ++i) {

                QueryPoint currentQueryPoint = queryPoints.at(i);
                mat joints = jointsVec.at(i);
                joints = fillTrajectoryMatrix(joints, tMax);
                dmpLearner = KUKADU_SHARED_PTR<JointDMPLearner>(new JointDMPLearner(az, bz, joints));

                KUKADU_SHARED_PTR<Dmp> learnedDmps = dmpLearner->fitTrajectories();
                learnedDmps->serialize(baseFolder + currentQueryPoint.getFileDmpPath());
                queryPoints.at(i).setDmp(learnedDmps);
                startingPos = queryPoints.at(i).getDmp()->getY0();

                cout << "(DMPGeneralizer) goals for query point [" << currentQueryPoint.getQueryPoint().t() << "]" << endl << "\t [";
                cout << currentQueryPoint.getDmp()->getG().t() << "]" << endl;

                //delete dmpLearner;
                dmpLearner = KUKADU_SHARED_PTR<JointDMPLearner>();

            }

        } else {

            queryPoints = mapFiles(queryFiles, trajFiles, dmpFiles, "query", "traj", "dmp");

        }

        degOfFreedom = queryPoints.at(0).getDmp()->getDegreesOfFreedom();

    }
Пример #2
0
void getFilesInDirectory( const QString &p, std::vector< QString > &files, bool recursive, const std::vector< QString > &filter )
{
    QString path=p;
    if (path.endsWith("/"))
    {
        int slash=path.indexOf('/',-1);
        path.truncate(slash);
    }
    else if (path.endsWith("\\"))
    {
        int slash=path.indexOf('\\',-1);
        path.truncate(slash);
    }

    QDir d(path);

    d.setFilter( QDir::Dirs | QDir::Hidden | QDir::NoSymLinks );

    std::vector< QString > subDir;

    const QFileInfoList &listDirectories = d.entryInfoList();
    QStringList filters;
    for (unsigned int i=0; i<filter.size(); ++i)
        filters << filter[i];

    d.setNameFilters(filters);
    for (int j = 0; j < listDirectories.size(); ++j)
    {
        QFileInfo fileInfo=listDirectories.at(j);
        subDir.push_back(fileInfo.fileName());

    }

    d.setFilter( QDir::Files | QDir::Hidden | QDir::NoSymLinks );

    const QFileInfoList &listFiles =
        d.entryInfoList();
    for (int j = 0; j < listFiles.size(); ++j)
    {
        QFileInfo fileInfo=listFiles.at(j);

        files.push_back(path+QString("/")+fileInfo.fileName());

    }

    if (recursive)
    {
        for (unsigned int i=0; i<subDir.size(); ++i)
        {
            if (subDir[i].left(1) == QString(".")) continue;
            if (subDir[i] == QString("OBJ"))       continue;

            QString nextDir=path+QString("/")+subDir[i];
            getFilesInDirectory(nextDir, files, recursive, filter);
        }
    }
}
Пример #3
0
	void cPluginManager::autoLoadPlugins()
	{
		cAudioVector<cAudioString>::Type fileList = getFilesInDirectory(".");
		for(size_t i=0; i<fileList.size(); ++i)
		{
			if(fileList[i].substr(0, 4) == "cAp_")
			{
#ifdef CAUDIO_PLATFORM_WIN
				if(fileList[i].substr(fileList[i].length()-4, 4) == ".dll")
#elif defined(CAUDIO_PLATFORM_LINUX)
				if(fileList[i].substr(fileList[i].length()-3, 3) == ".so")
#elif defined(CAUDIO_PLATFORM_MAC)
				if(fileList[i].substr(fileList[i].length()-6, 6) == ".dylib")
#endif
				{
					//Found a plugin, load it
					installPlugin(cAudioString("./" + fileList[i]).c_str(), NULL);
				}
			}
		}
	}
Пример #4
0
void pcl::face_detection::FaceDetectorDataProvider<FeatureType, DataSet, LabelType, ExampleIndex, NodeType>::initialize(std::string & data_dir)
{
  std::string start;
  std::string ext = std::string ("pcd");
  bf::path dir = data_dir;

  std::vector < std::string > files;
  getFilesInDirectory (dir, start, files, ext);

  //apart from loading the file names, we will do some bining regarding pitch and yaw
  std::vector < std::vector<int> > yaw_pitch_bins;
  std::vector < std::vector<std::vector<std::string> > > image_files_per_bin;

  float res_yaw = 15.f;
  float res_pitch = res_yaw;
  int min_yaw = -75;
  int min_pitch = -60;

  int num_yaw = static_cast<int>((std::abs (min_yaw) * 2) / static_cast<int>(res_yaw + 1.f));
  int num_pitch = static_cast<int>((std::abs (min_pitch) * 2) / static_cast<int>(res_pitch + 1.f));

  yaw_pitch_bins.resize (num_yaw);
  image_files_per_bin.resize (num_yaw);
  for (int i = 0; i < num_yaw; i++)
  {
    yaw_pitch_bins[i].resize (num_pitch);
    image_files_per_bin[i].resize (num_pitch);
    for (int j = 0; j < num_pitch; j++)
    {
      yaw_pitch_bins[i][j] = 0;
    }
  }

  for (size_t i = 0; i < files.size (); i++)
  {
    std::stringstream filestream;
    filestream << data_dir << "/" << files[i];
    std::string file = filestream.str ();

    std::string pose_file (files[i]);
    boost::replace_all (pose_file, ".pcd", "_pose.txt");

    Eigen::Matrix4f pose_mat;
    pose_mat.setIdentity (4, 4);

    std::stringstream filestream_pose;
    filestream_pose << data_dir << "/" << pose_file;
    pose_file = filestream_pose.str ();

    bool result = readMatrixFromFile (pose_file, pose_mat);
    if (result)
    {
      Eigen::Vector3f ea = pose_mat.block<3, 3> (0, 0).eulerAngles (0, 1, 2);
      ea *= 57.2957795f; //transform it to degrees to do the binning
      int y = static_cast<int>(pcl_round ((ea[0] + static_cast<float>(std::abs (min_yaw))) / res_yaw));
      int p = static_cast<int>(pcl_round ((ea[1] + static_cast<float>(std::abs (min_pitch))) / res_pitch));

      if (y < 0)
        y = 0;
      if (p < 0)
        p = 0;
      if (p >= num_pitch)
        p = num_pitch - 1;
      if (y >= num_yaw)
        y = num_yaw - 1;

      assert (y >= 0 && y < num_yaw);
      assert (p >= 0 && p < num_pitch);

      yaw_pitch_bins[y][p]++;

      image_files_per_bin[y][p].push_back (file);
    }
  }

  pcl::face_detection::showBining (num_pitch, res_pitch, min_pitch, num_yaw, res_yaw, min_yaw, yaw_pitch_bins);

  int max_elems = 0;
  int total_elems = 0;

  for (int i = 0; i < num_yaw; i++)
  {
    for (int j = 0; j < num_pitch; j++)
    {
      total_elems += yaw_pitch_bins[i][j];
      if (yaw_pitch_bins[i][j] > max_elems)
        max_elems = yaw_pitch_bins[i][j];
    }
  }

  float average = static_cast<float> (total_elems) / (static_cast<float> (num_pitch + num_yaw));
  std::cout << "The average number of image per bin is:" << average << std::endl;

  std::cout << "Total number of images in the dataset:" << total_elems << std::endl;
  //reduce unbalance from dataset by capping the number of images per bin, keeping at least a certain min
  if (min_images_per_bin_ != -1)
  {
    std::cout << "Reducing unbalance of the dataset." << std::endl;
    for (int i = 0; i < num_yaw; i++)
    {
      for (int j = 0; j < num_pitch; j++)
      {
        if (yaw_pitch_bins[i][j] >= min_images_per_bin_)
        {
          std::random_shuffle (image_files_per_bin[i][j].begin (), image_files_per_bin[i][j].end ());
          image_files_per_bin[i][j].resize (min_images_per_bin_);
          yaw_pitch_bins[i][j] = min_images_per_bin_;
        }

        for (size_t ii = 0; ii < image_files_per_bin[i][j].size (); ii++)
        {
          image_files_.push_back (image_files_per_bin[i][j][ii]);
        }
      }
    }
  }

  pcl::face_detection::showBining (num_pitch, res_pitch, min_pitch, num_yaw, res_yaw, min_yaw, yaw_pitch_bins);
  std::cout << "Total number of images in the dataset:" << image_files_.size () << std::endl;
}
Пример #5
0
int annTrain::train(std::string imagesDir, int networkInputSize, float testRatio)
{

    std::cout << "Reading training set..." << std::endl;
    uint64 start = ofGetElapsedTimeMillis();
    std::vector<std::string> files = getFilesInDirectory(imagesDir);
    std::random_shuffle(files.begin(), files.end());
    
    cv::Mat img;
    
    for (auto it = files.begin(); it != files.end(); ++it)
    {
        std::string filename = *it;
        //std::cout << "Reading image " << filename << "..." << std::endl;
        img = cv::imread(filename, 0);

        if (img.empty())
        {
            std::cerr << "WARNING: Could not read image." << std::endl;
            continue;
        }
        std::string classname = getClassName(filename);
        cv::Mat descriptors = getDescriptors(img);
        processClassAndDesc(classname, descriptors);
    }
    
    std::cout << " Seconds : " << (ofGetElapsedTimeMillis() - start) / 1000.0 << std::endl;
    
    std::cout << "Creating vocabulary..." << std::endl;
    start = ofGetElapsedTimeMillis();
    cv::Mat labels;
    cv::Mat vocabulary;
    // Use k-means to find k centroids (the words of our vocabulary)
    cv::kmeans(descriptorsSet, networkInputSize, labels, cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::MAX_ITER, 10, 0.01), 1, cv::KMEANS_PP_CENTERS, vocabulary);
    // No need to keep it on memory anymore
    descriptorsSet.release();
    std::cout << " Seconds : " << (ofGetElapsedTimeMillis() - start) / 1000.0 << std::endl;
    
    // Convert a set of local features for each image in a single descriptors
    // using the bag of words technique
    std::cout << "Getting histograms of visual words..." << std::endl;
    int* ptrLabels = (int*)(labels.data);
    int size = labels.rows * labels.cols;
    for (int i = 0; i < size; i++)
    {
        int label = *ptrLabels++;
        ImageData* data = descriptorsMetadata[i];
        data->bowFeatures.at<float>(label)++;
    }
    
    // Filling matrixes to be used by the neural network
    std::cout << "Preparing neural network..." << std::endl;
    std::set<ImageData*> uniqueMetadata(descriptorsMetadata.begin(), descriptorsMetadata.end());
    for (auto it = uniqueMetadata.begin(); it != uniqueMetadata.end(); )
    {
        ImageData* data = *it;
        cv::Mat normalizedHist;
        cv::normalize(data->bowFeatures, normalizedHist, 0, data->bowFeatures.rows, cv::NORM_MINMAX, -1, cv::Mat());
        trainSamples.push_back(normalizedHist);
        trainResponses.push_back(getClassCode(classes, data->classname));
        delete *it; // clear memory
        it++;
    }
    descriptorsMetadata.clear();
    
    // Training neural network
    std::cout << "Training neural network..." << std::endl;
    start = ofGetElapsedTimeMillis();
    mlp = getTrainedNeuralNetwork(trainSamples, trainResponses);
    std::cout << " Seconds : " << (ofGetElapsedTimeMillis() - start) / 1000.0 << std::endl;
    
    // We can clear memory now
    trainSamples.release();
    trainResponses.release();
    
    // Train FLANN
    std::cout << "Training FLANN..." << std::endl;
    start = ofGetElapsedTimeMillis();
    
    flann = cv::Ptr<cv::FlannBasedMatcher>(new cv::FlannBasedMatcher());
    
    flann->add(vocabulary);
    flann->train();
    std::cout << " Seconds : " << (ofGetElapsedTimeMillis() - start) / 1000.0 << std::endl;
    
    // Reading test set
    std::cout << "Reading test set..." << std::endl;
    start = ofGetElapsedTimeMillis();
    readImagesToTest(files.begin() + (size_t)(files.size() * testRatio), files.end());
    std::cout << " Seconds : " << (ofGetElapsedTimeMillis() - start) / 1000.0 << std::endl;
    
    // Get confusion matrix of the test set
    std::vector<std::vector<int> > confusionMatrix = getConfusionMatrix();
    
    // how accurate is our model
    std::cout << "Confusion matrix " << std::endl;
    printConfusionMatrix(confusionMatrix, classes);
    std::cout << "Accuracy " << getAccuracy(confusionMatrix) << std::endl;
    
    // now save everything
    std::cout << "saving models" << std::endl;
    saveModels(vocabulary, classes);
    
    return 0;
}