/////////////////////////////////////////////////////////////// // Validate the Command line arguments. bool CmdLineArgs::ValidateAndStoreArgs(int argc,char* argv[]) { try { if ( argc < 4 ) { return false; } std::string recurseOption = argv[1]; if ( recurseOption == "-r" ) { recurse = true; } else if ( recurseOption == "-nr") { recurse = false;} else { return false; } pathSpec = argv[2]; filePattern.clear(); for(int i = 3; i < argc;i++) { filePattern.push_back(argv[i]); } fetchFiles(); return true; } catch (std::exception ex) { std::cout<<std::endl<<"Error Processing Arguments : "<<ex.what(); return false; } }
wb_pkg::wb_pkg( char *nodelist, bool distribute, bool config_only) { if ( nodelist) { char node_str[32][20]; int num; cdh_ToLower( nodelist, nodelist); num = dcli_parse( nodelist, " ,", "", (char *)node_str, sizeof(node_str)/sizeof(node_str[0]), sizeof(node_str[0]), 0); m_allnodes = false; for ( int i = 0; i < num; i++) { pkg_node n( node_str[i]); m_nodelist.push_back( n); } } else m_allnodes = true; readConfig(); if ( config_only) return; fetchFiles( distribute); }
int main(int argc, char** argv) { if (argc != 3) { help(); return 0; } // ------------ TRAINING PHASE { // ----- LOAD IMAGES FROM PATH QString path(argv[1]); QStringList filesToOpen = fetchFiles(path); if (filesToOpen.empty()) { help(); return 1; } // Create feature detector and descriptor extractor: cv::Ptr<cv::ORB> detector = cv::ORB::create(150); cv::Mat trainingDescriptors(0, detector->descriptorSize(), detector->descriptorType()); std::cout << "Training descriptor size & type : " << detector->descriptorSize() << ", " << detector->descriptorType() << std::endl; std::vector<cv::KeyPoint> keypoints; // Loop on files : std::cout << "Extract features from training images" << std::endl; foreach (QString file, filesToOpen) { SD_TRACE1("Open file '%1'", file); QString f = path + "/" + file; cv::Mat inImage = cv::imread(f.toStdString(), cv::IMREAD_GRAYSCALE); cv::Mat descriptors; detector->detectAndCompute(inImage, cv::Mat(), keypoints, descriptors); trainingDescriptors.push_back(descriptors); cv::Mat imageWithKeyPoints; cv::drawKeypoints(inImage, keypoints, imageWithKeyPoints); // ImageCommon::displayMat(imageWithKeyPoints, true, "Input image with keypoints"); keypoints.clear(); } std::cout << "Total number of descriptors : " << trainingDescriptors.rows << std::endl; trainingDescriptors.convertTo(trainingDescriptors, CV_32F); ImageCommon::printMat(trainingDescriptors, "trainingDescriptors"); cv::BOWKMeansTrainer bowTrainer(15); bowTrainer.add(trainingDescriptors); cv::Mat vocabulary = bowTrainer.cluster(); //ImageCommon::displayMat(vocabulary, true, "Vocabulary"); // Train }