コード例 #1
0
ファイル: classify.cpp プロジェクト: fikgol/SDMP
int main(int argc, char *argv[])
{
     Classifier* clp;
     string text;
     int ch;
     if((ch=getopt(argc,argv,"hw:"))!=-1){
	  switch(ch)
	  {
	  case 'h':
	       cout<<"-h ----for help\n-w ----选择数据加载方式:db 表示存储在数据库,mem 表示初始完全加载于内存.默认db\n";
	       return 0;
	  case 'w':
	       if(!strcmp(optarg,"db"))
		    clp=server("db","navi").clp;
	       else if(!strcmp(optarg,"mem"))
		    clp=server("mem","navi").clp;
	       else {
		    cout<<"Undefined: "<<optarg<<endl;
		    return 1;
	       }
	        while(cin>>text)
		     cout<<"类:"<<clp->classify(text)<<endl;
	  default:
	       return 1;
	  }	  
     }
      clp=server("db","navi").clp;
      while(cin>>text)
      	  cout<<"类:"<<clp->classify(text)<<endl;

     return 0;
}
コード例 #2
0
	void
	inputCallback(const sensor_msgs::Image::ConstPtr& color_image_msg, const sensor_msgs::PointCloud2::ConstPtr& pointcloud_msg)
	{
		//ROS_INFO("Input Callback");

		//Elements needed for recognition




		// convert color image to cv::Mat
		cv_bridge::CvImageConstPtr color_image_ptr;
		cv::Mat color_image;
		convertColorImageMessageToMat(color_image_msg, color_image_ptr, color_image);

		// get color image from point cloud
		pcl::PointCloud<pcl::PointXYZRGB> point_cloud_src;
		pcl::fromROSMsg(*pointcloud_msg, point_cloud_src);

// cv::Mat color_image = cv::Mat::zeros(point_cloud_src.height, point_cloud_src.width, CV_8UC3);
// for (unsigned int v=0; v<point_cloud_src.height; v++)
// {
// for (unsigned int u=0; u<point_cloud_src.width; u++)
// {
// pcl::PointXYZRGB point = point_cloud_src(u,v);
// if (isnan_(point.z) == false)
// color_image.at<cv::Point3_<unsigned char> >(v,u) = cv::Point3_<unsigned char>(point.b, point.g, point.r);
// }
// }
		cv::Mat color_image_copy = color_image.clone();
		int half_side_length = 50;
		cv::rectangle(color_image_copy, cv::Point(color_image.cols/2-half_side_length, color_image.rows/2-half_side_length),
				cv::Point(color_image.cols/2+half_side_length, color_image.rows/2+half_side_length), CV_RGB(0, 255, 0), 3);
		cv::imshow("color image", color_image_copy);

		char key= cv::waitKey(20);
		//std::cout<<key;
		//char key = 'c';
		//std::cout<<"gut"<<std::endl;

		//std::cout<< "How do you want to name the image? (The name of the data cloud is the same as the one of the image"<<std::endl;


		if (key=='c')
		{
			//The image captured is in 'color_image_copy' and then classified
			Classifier C;
			C.imageExtractROI(color_image_copy);
			C.classify();

		}
		else if (key=='q')
			ros::shutdown();


	}
コード例 #3
0
ファイル: classifierTest.cpp プロジェクト: GabiThume/msc-src
int main(int argc, char const *argv[]){

  Classifier c;
  int numClasses, m, d, indexDescriptor, minoritySize;
  double prob = 0.5;
  std::string newDir, baseDir, featuresDir, csvOriginal, directory, str, bestDir;
  std::vector<ImageClass> originalData;

  if (argc != 3){
    std::cout << "\nUsage: ./rebalanceTest (0) (1) (2) (3) (4)\n " << std::endl;
    std::cout << "\t(0) Directory to place tests\n" << std::endl;
    std::cout << "\t(1) Image Directory\n" << std::endl;
    std::cout << "\t(2) Features Directory\n" << std::endl;
    std::cout << "\t(3) Analysis Directory\n" << std::endl;
    std::cout << "\t./rebalanceTest Desbalanced/ Desbalanced/original/ Desbalanced/features/ Desbalanced/analysis/ 0\n" << std::endl;
    exit(-1);
  }
  newDir = std::string(argv[1]);
  baseDir = std::string(argv[2]);
  // featuresDir = std::string(argv[3]);
  // analysisDir = std::string(argv[4]);

  /*  Available
  descriptorMethod: {"BIC", "GCH", "CCV", "Haralick6", "ACC", "LBP", "HOG", "Contour", "Fisher"}
  Quantization quantizationMethod: {"Intensity", "Luminance", "Gleam", "MSB"}
  */
  std::vector <int> descriptors {1, 2, 3, 4, 5, 6, 7, 8};

  for (indexDescriptor = 0; indexDescriptor < (int)descriptors.size(); indexDescriptor++){
    d = descriptors[indexDescriptor];
    for (m = 1; m <= 5; m++){
      csvOriginal = newDir+"/analysis/original_"+descriptorMethod[d-1]+"_"+quantizationMethod[m-1]+"_";
      featuresDir = newDir+"/features/";

      /* Feature extraction from images */
      std::string originalDescriptor = desc(baseDir, featuresDir, d, m, "original");
      /* Read the feature vectors */
      originalData = ReadFeaturesFromFile(originalDescriptor);
      numClasses = originalData.size();
      if (numClasses != 0){
        std::cout << "---------------------------------------------------------------------------------------" << std::endl;
        std::cout << "Classification using original data" << std::endl;
        std::cout << "Features vectors file: " << originalDescriptor.c_str() << std::endl;
        std::cout << "---------------------------------------------------------------------------------------" << std::endl;
        c.findSmallerClass(originalData, &minoritySize);
        c.classify(prob, 1, originalData, csvOriginal.c_str(), minoritySize);
        originalData.clear();
      }
    }
  }
  return 0;
}
コード例 #4
0
int main () {


	DataBase db("Database.txt", FALSE);
	std::string name;
	int cl;
	float error=0;

	for(int i=0; i<db.getSize(); i++){
		Classifier c;

		name = db.getElement(i);
		std::cout<<"Analizying image "<<name<<" ..."<<std::endl;
		cv::Mat image= cv::imread(name+".jpg");


		std::cout<<"Image read..."<<std::endl;

		if(name[0]=='O' || name[0]=='P'){

			std::cout <<"ROI Extraction..."<<std::endl;
			std::cout<<"Image size: "<<image.size()<<std::endl;
			c.imageExtractROI(image);

		}
		else
			c.setImage(image);

		std::cout<<"Image set..."<<std::endl;
		c.classify();
		cl = c.getClass();

		if(name[0]=='P' && cl!=PEN) error++;
		else if(name[0]=='S' && cl!=SCISSORS) error++;
		else if (name[0]=='O' && cl!=NO_IDENTIFIED) error++;


	}

	std::cout<<"Error= "<<error/db.getSize()<<std::endl;




	return 0;
}
コード例 #5
0
void classifyAndMark(const Mat& src, Mat& marked, Classifier& c) {

    int inLaneStart = 389;
    
    marked = Mat(src.size(), src.type());
    Mat lab;
    if (getCvType() >= 0) {
        cvtColor(src, lab, getCvType());
    } else {
        lab = src;
    }
    uchar features[2];
    for(int row = 0; row < lab.rows; ++row) {
        //cout << "Row: " << row << endl;
        Point3_<uchar> *p = lab.ptr<Point3_<uchar> > (row);
        Point3_<uchar> *sp = marked.ptr<Point3_<uchar> >(row);
        //assumes CV_8UC3 LAB color image, with 3 values per pixel
        for(int col = 0; col < lab.cols; ++col, ++p, ++sp) {
            //throw away the L
//            cout << "Col: " << col << endl;

            getFeatures(p, features);
            const Instance inst = makeInstance(features, abs(col - inLaneStart), -1);
            int label = c.classify(inst);    
            if (row == 2) {
                //cout << "Col " << col << ": " << (int)p->x << ", " << (int)p->y << ", " << (int)p->z << ", " << label << endl;
            }
            sp->x = 0;
            //cout << countPositives << endl;
            if (label) {
                sp->y = 0xFF;
                sp->z = 0;
            } else {
                sp->y = 0;
                sp->z = 0;
            }
        }
    }   
}
コード例 #6
0
ファイル: Training.cpp プロジェクト: Placc/MultimediaProjekt
	/**
	*	trains an svm model using the InriaPerson training dataset
	*	choses a number of negative detections randomly chosen from each image
	*	saves the freshly trained model to the harddisk
	*	@param string modelFile: path to the model file
	*/
	void trainSVM(string modelFile){
		vector<string> pos_examples;
		vector<string> neg_examples;
		Classifier classifier = Classifier();
		RNG random_index = RNG( time (NULL) );

		Rect roi(Point(16,16), Point(80, 144));
		Preprocessing::loadPathsByDirectory(TRAIN_POS_NORMALIZED, pos_examples);
		Preprocessing::loadPathsByDirectory(TRAIN_NEG_ORIGINAL, neg_examples);
		for(vector<string>::iterator posIt = pos_examples.begin(); posIt != pos_examples.end(); ++posIt)
		{
			OriginalImage img( *posIt );
			( img ).image = ( img ).image(roi);
			FeatureExtraction::extractHOGFeatures( img );
			classifier.addPositiveExample(img.hog_features);
		}
		for(vector<string>::iterator negIt = neg_examples.begin(); negIt != neg_examples.end(); ++negIt)
		{

			OriginalImage img( *negIt );
			FeatureExtraction::computeHOGPyramid( img );
			FeatureExtraction::computeSlidingWindows( img );

			for(std::vector<Image>::iterator scaleIt = ( img ).lower_images.begin(); scaleIt != ( img ).lower_images.end(); ++scaleIt)
			{
				FeatureExtraction::computeSlidingWindows(*scaleIt);
				
			}

			int count = 0;
			double howMuch = ( (double)( MAXRANDOMELEMENTS ) / (double)( img.lower_images.size() + 1 ) );
			for( int i = 0; i < (int)( howMuch ); i++ )
			{
				classifier.addNegativeExample( img.slidingWindows[ random_index.uniform( 0, img.slidingWindows.size() ) ].hog_features );
				count++;
			}
			howMuch = howMuch + ( (double)( MAXRANDOMELEMENTS ) / (double)( img.lower_images.size() + 1) ) - (int)howMuch;
			if( howMuch < MAXRANDOMELEMENTS )
				for( int current = 0; current < img.lower_images.size(); current++ )
				{
					for( int i = 0; i < (int)( howMuch ); i++ )
					{
						int y = random_index.uniform( 0, img.lower_images[ current ].slidingWindows.size() );
						Mat hog_features = img.lower_images[ current ].slidingWindows[ y ].hog_features;
						classifier.addNegativeExample( hog_features );
						count++;
					}
					howMuch = howMuch + ( (double)( MAXRANDOMELEMENTS ) / (double)( img.lower_images.size() + 1) ) - (int)howMuch;
				}
				while( count < MAXRANDOMELEMENTS )
				{
					int x = random_index.uniform( 0, img.lower_images.size() );
					int y = random_index.uniform( 0, img.lower_images[ x ].slidingWindows.size() );
					Mat hog_features = img.lower_images[ x ].slidingWindows[ y ].hog_features;
					classifier.addNegativeExample( hog_features );
					count++;
				}
				
		}
		classifier.train( MODEL_STANDARD_FILE );

		classifier.loadModel( MODEL_STANDARD_FILE );

		for(vector<string>::iterator negIt = neg_examples.begin(); negIt != neg_examples.end(); ++negIt)
		{

			OriginalImage img( *negIt );
			FeatureExtraction::computeHOGPyramid( img );
			FeatureExtraction::computeSlidingWindows( img );

			
			for( vector<SlidingWindow>::iterator it= img.slidingWindows.begin(); it != img.slidingWindows.end(); ++it )
			{
				if( classifier.classify( ( *it ).hog_features ) > 0.0 )
				{
					classifier.addNegativeExample( (*it ).hog_features );
				}

			}


			for(std::vector<Image>::iterator scaleIt = ( img ).lower_images.begin(); scaleIt != ( img ).lower_images.end(); ++scaleIt)
			{
				FeatureExtraction::computeSlidingWindows(*scaleIt);

				for( vector<SlidingWindow>::iterator it= ( *scaleIt ).slidingWindows.begin(); it != ( *scaleIt ).slidingWindows.end(); ++it )
				{
					if( classifier.classify( ( *it ).hog_features ) > 0.0 )
					{
						classifier.addNegativeExample( (*it ).hog_features );
					}

				}
				
			}



		}

		classifier.train( MODEL_HARD_EX_FILE );
	}
コード例 #7
0
ファイル: main.cpp プロジェクト: leftfelt/cpp
void main(){
	int pattern_num = 100;
	ClassifierSetting setting;
	setting.classification_num = 10; //分類回数
	setting.permissible_error = 0.01;
	setting.cluster_size = 10; //分類数
	setting.data_max = pattern_num + pattern_num;
	setting.data_min = 0;
	setting.dimension_num = 2;

	Classifier *classifier = new KMeansClassifier(setting);
	std::vector<Pattern> pattern_list;
	pattern_list.resize(pattern_num);
	int count = 0;
	
	std::generate(pattern_list.begin(), pattern_list.end(), [&]()->Pattern{
		Pattern pattern(setting.dimension_num);
		for(int i = 0 ; i < (signed)pattern.size() ; i++){
			pattern[i] = count + count;
		}
		count++;
		return pattern;
	});

	Dictionary dictionary = classifier->classify(pattern_list);

	std::for_each(dictionary.begin(), dictionary.end(),[](Pattern pattern){
		std::cout << "[ ";
		for(int i = 0 ; i < pattern.size() ; i++){
			std::cout << pattern[i] << " ";
		}
		std::cout << "]" << std::endl;
	});
	
	std::cout << "==================================================" << std::endl;

	Pattern pattern(setting.dimension_num);
	pattern_list.clear();
	pattern[0] = pattern_num * 3;
	pattern[1] = pattern_num * 3;
	pattern_list.push_back(pattern);
	pattern[0] = pattern_num * 4;
	pattern[1] = pattern_num * 4;
	pattern_list.push_back(pattern);
	pattern[0] = pattern_num * 5;
	pattern[1] = pattern_num * 5;
	pattern_list.push_back(pattern);
	
	dictionary = classifier->classify(pattern_list, dictionary);

	std::for_each(dictionary.begin(), dictionary.end(),[](Pattern pattern){
		std::cout << "[ ";
		for(int i = 0 ; i < pattern.size() ; i++){
			std::cout << pattern[i] << " ";
		}
		std::cout << "]" << std::endl;
	});
	

	getchar();
}
コード例 #8
0
void track3(ImageSource::InputDevice input, int numBaseClassifier, float overlap, float searchFactor, char* resultDir, Rect initBB, char* source = NULL)
{
	unsigned char *curFrame=NULL;
        int key;
	//choose the image source
	ImageSource *imageSequenceSource;
	switch (input)
	{
	case ImageSource::AVI:
		imageSequenceSource = new ImageSourceAVIFile(source);
		break;
	case ImageSource::DIRECTORY:
		imageSequenceSource = new ImageSourceDir(source);
		break;
	case ImageSource::USB:
		imageSequenceSource = new ImageSourceUSBCam();
		break;
	default:
		return;
	}

	ImageHandler* imageSequence = new ImageHandler (imageSequenceSource);
	imageSequence->getImage();

	imageSequence->viewImage ("Tracking...", false);

		trackingRect=initBB;


	curFrame = imageSequence->getGrayImage ();
	ImageRepresentation* curFrameRep = new ImageRepresentation(curFrame, imageSequence->getImageSize());
	Rect wholeImage;
	wholeImage = imageSequence->getImageSize();


        // Pula o inicio do video
        for(int t = 0; t < 60; t++, imageSequence->getImage());


        IplImage *image;
        image = imageSequence->getIplImage();


//cvWaitKey(0);


        printf ("init tracker...");
        Classifier* tracker;
        tracker = new Classifier(image, trackingRect);
        printf (" done.\n");

	Size trackingRectSize;
	trackingRectSize = trackingRect;
	printf ("start tracking (stop by pressing any key)...\n\n");

// Inicializa o detector
Detector* detector;
detector = new Detector(tracker->getClassifier());
Rect trackedPatch = trackingRect;
Rect validROI;
validROI.upper = validROI.left = 0;
validROI.height = image->height;
validROI.width = image->width;


	key=(char)-1;
        while (key==(char)-1)
	{





		imageSequence->getImage();
                image = imageSequence->getIplImage();




curFrame = imageSequence->getGrayImage ();
if (curFrame == NULL) break;

//calculate the patches within the search region
Patches *trackingPatches;
Rect searchRegion;
searchRegion = getTrackingROI(searchFactor, trackedPatch, validROI);
trackingPatches = new PatchesRegularScan(searchRegion, wholeImage, trackingRectSize, overlap);

curFrameRep->setNewImageAndROI(curFrame, searchRegion);

detector->classifySmooth(curFrameRep, trackingPatches);

trackedPatch = trackingPatches->getRect(detector->getPatchIdxOfBestDetection());

if (detector->getNumDetections() <= 0){printf("Lost...\n");break;}



                // Treina o classificador
                tracker->train(image,trackedPatch);


float alpha, confidence, eval;

alpha = tracker->getSumAlphaClassifier();
confidence = detector->getConfidenceOfBestDetection() / alpha;
eval = tracker->classify(image, trackedPatch);

printf("alpha: %5.3f confidence: %5.3f evalOficial: %5.3f ", alpha, confidence, eval);

int orig = trackedPatch.upper;
trackedPatch.upper -= 5;
for(int i = 0; i < 10; i++){
    eval = tracker->classify(image, trackedPatch);
    printf("%5.3f ", eval);
    trackedPatch.upper += 1;
    imageSequence->paintRectangle (trackedPatch, Color (0,255,0), 1);
}
trackedPatch.upper = orig;

printf("\n");




		//display results
                imageSequence->paintRectangle (trackedPatch, Color (255,0,0), 5);
		imageSequence->viewImage ("Tracking...", false);
		key=cvWaitKey(200);
	}

	//clean up
	delete tracker;
	delete imageSequenceSource;
	delete imageSequence;
	if (curFrame == NULL)
		delete[] curFrame;
	delete curFrameRep;
}