コード例 #1
0
TEST(NaiveBayesTest, test2) {
   enum Bird {
    SMALL,
    MIDDLE,
    BIG
  }; 
  TrainingSet<Bird, 2> trainingSet;
  
  // weight, height
  std::array<double, 2> s1 = {2, 10};
  std::array<double, 2> s2 = {2.3, 12};
  trainingSet.add(SMALL, s1);
  trainingSet.add(SMALL, s2);
  
  std::array<double, 2> m1 = {4, 15};
  std::array<double, 2> m2 = {4.7, 17.2};
  trainingSet.add(MIDDLE, m1);
  trainingSet.add(MIDDLE, m2);
  
  std::array<double, 2> b1 = {7, 23};
  std::array<double, 2> b2 = {8.5, 22.5};
  trainingSet.add(BIG, b1);
  trainingSet.add(BIG, b2);
  
  NaiveBayesClasifier<Bird, 2> clasifier;
  EXPECT_TRUE(clasifier.train(trainingSet));
  
  std::array<double, 2> sample1 = {1.5, 9};
  std::array<double, 2> sample2 = {4.9, 16};
  std::array<double, 2> sample3 = {9, 20};
  
  EXPECT_EQ(clasifier.clasify(sample1), SMALL);
  EXPECT_EQ(clasifier.clasify(sample2), MIDDLE);
  EXPECT_EQ(clasifier.clasify(sample3), BIG);
}
コード例 #2
0
ファイル: trainingset.cpp プロジェクト: marlncpe/INSYDE
bool TrainingSet::operator!=(const TrainingSet &trset)
{
	if(trset.getInputs() == inputs && trset.getTargets() == targets){
		return false;
	}
	return true;
}
コード例 #3
0
ファイル: ClassificationTree.cpp プロジェクト: mrthat/cvpr
void	ClassificationTree::print_train_log(const TreeNode::PtrSplitNodeBase split, const TrainingSet &train_set) const
{
	MatType				ltype	=	train_set.get_label_type();
	MatType				ftype	=	train_set.get_feature_type();
	int					rows	=	(int)ltype.total();
	cv::Mat_<double>	left_tmp;
	cv::Mat_<double>	right_tmp;
	TrainingSet			left_set(ftype, ltype);
	TrainingSet			right_set(ftype, ltype);

	split->operator()(train_set, left_set, right_set);

	
	left_set.compute_target_mean(left_tmp);
	right_set.compute_target_mean(right_tmp);

	cv::Mat_<double>	left_dist(rows, 1, (double*)left_tmp.data);
	cv::Mat_<double>	right_dist(rows, 1, (double*)right_tmp.data);


	printf("left dist\n");
	for (unsigned ii = 0; ii < left_dist.total(); ++ii) {
		printf("\tlabel%d:%f\n", ii, left_dist.at<double>(ii) / left_set.size());
	}

	printf("right dist\n");
	for (unsigned ii = 0; ii < right_dist.total(); ++ii) {
		printf("\tlabel%d:%f\n", ii, right_dist.at<double>(ii) / right_set.size());
	}

}
コード例 #4
0
TEST(NaiveBayesTest, test1) {
  enum Type {
    MALE,
    FEMALE 
  };
  TrainingSet<Type, 3> trainingSet;
  
  // feature contains height, weight and foot size
  std::array<double, 3> m1 = {6, 180, 12};
  std::array<double, 3> m2 = {5.92, 190, 11};
  std::array<double, 3> m3 = {5.58, 170, 12};
  std::array<double, 3> m4 = {5.92, 165, 10};
  trainingSet.add(MALE, m1);
  trainingSet.add(MALE, m2);
  trainingSet.add(MALE, m3);
  trainingSet.add(MALE, m4);
  
  std::array<double, 3> f1 = {5, 100, 6};
  std::array<double, 3> f2 = {5.5, 150, 8};
  std::array<double, 3> f3 = {5.42, 130, 7};
  std::array<double, 3> f4 = {5.75, 150, 9};
  trainingSet.add(FEMALE, f1);
  trainingSet.add(FEMALE, f2);
  trainingSet.add(FEMALE, f3);
  trainingSet.add(FEMALE, f4);
  
  std::array<double, 3> sample1 = {5, 120, 7.7};
  std::array<double, 3> sample2 = {6.3, 172, 11.5};
  
  NaiveBayesClasifier<Type, 3> clasifier;
  EXPECT_TRUE(clasifier.train(trainingSet));
  
  EXPECT_EQ(clasifier.clasify(sample1), FEMALE);
  EXPECT_EQ(clasifier.clasify(sample2), MALE);
}
コード例 #5
0
ファイル: Interface.cpp プロジェクト: initial-d/smt_server
float CalculateBP(const char * reffn, const char * trainfn, int refNum, int ngram, int bleuType)
{
    TrainingSet * ts = new TrainingSet();
    if( bleuType < 3 )
        ts->LoadRefData(reffn, refNum, ngram, MAX_SENT_NUM);
    ts->LoadTrainingData(trainfn, false);
    float BP = trainer->GetBP(ts, ngram, (BLEU_TYPE)bleuType, 0);
    delete ts;
    return BP;
}
コード例 #6
0
ファイル: LogLikelihood.C プロジェクト: bmajoros/BioMaLL
LogLikelihood::LogLikelihood(TrainingSet &positives,TrainingSet &negatives)
  : positives(positives),
    negatives(negatives),
    numPos(positives.numCases()),
    numNeg(negatives.numCases()),
    numAttributes(positives.getSchema().numAttributes()),
    func(positives.getSchema().numAttributes()),
    ok(true)
{
}
コード例 #7
0
TEST(TrainingSetTest, test1) { 
    enum Bike {
        MOUNTAIN_BIKE,
        FAST_BIKE
    };
    TrainingSet<Bike, 2> trainingSet;
    std::array<double, 2> t1 = {1, 2};
    std::array<double, 2> t2 = {3, 4};
    std::array<double, 2> t3 = {5, 6};
    std::array<double, 2> t4 = {7, 8};
    trainingSet.add(MOUNTAIN_BIKE, t1);
    trainingSet.add(MOUNTAIN_BIKE, t2);
    trainingSet.add(FAST_BIKE, t3);
    trainingSet.add(FAST_BIKE, t4);
    
    std::vector<double> expected1 = {1, 3};
    std::vector<double> expected2 = {2, 4};
    
    EXPECT_TRUE(trainingSet.hasMoreTypes());
    EXPECT_EQ(trainingSet.getNextType(), MOUNTAIN_BIKE);
    EXPECT_TRUE(trainingSet.getNextFeature() == expected1);
    EXPECT_TRUE(trainingSet.getNextFeature() == expected2);
    EXPECT_TRUE(trainingSet.hasMoreTypes());
    EXPECT_EQ(trainingSet.getNextType(), FAST_BIKE);
    EXPECT_FALSE(trainingSet.hasMoreFeatures());
}
コード例 #8
0
ファイル: Interface.cpp プロジェクト: initial-d/smt_server
bool MERTraining(const char * reffn, const char * trainfn, const char * configfn, int ngram, int bleuType)
{
    TrainingSet * ts = new TrainingSet();
    if( bleuType < 3 )
        ts->LoadRefData(reffn, 4, ngram, MAX_SENT_NUM);
    ts->LoadTrainingData(trainfn, false);
    trainer->LoadPara(configfn);
    trainer->OptimzieWeightsWithMERT(ts, ngram, (BLEU_TYPE)bleuType, 0);
    delete ts;
    return true;
}
コード例 #9
0
ファイル: ClassificationTree.cpp プロジェクト: mrthat/cvpr
void	ClassificationTree::print_train_log(const TreeNode::PtrLeafNodeBase leaf, const TrainingSet &train_set) const
{
	cv::Mat_<double>	label_dist;

	train_set.compute_target_mean(label_dist);

	printf("leaf dist\n");

	for (unsigned ii = 0; ii < label_dist.total(); ++ii) {
		printf("\tlabel%d:%f\n", ii, label_dist.at<double>(ii) / std::max<double>((double)train_set.size(), 1.0));
	}
}
コード例 #10
0
void CLTreeTrainer<ImgType, nChannels, FeatType, FeatDim, nClasses>::train(
  Tree<FeatType, FeatDim, nClasses> &tree,
  const TrainingSet<ImgType, nChannels> &trainingSet,
  const TreeTrainerParameters<FeatType, FeatDim> &params,
  unsigned int startDepth, unsigned int endDepth)
{
  /** \todo support a starting depth different from 1 */
  if (startDepth!=1) throw "Starting depth must be equal to 1";

  _initTrain(tree, trainingSet, params, startDepth, endDepth);
  

  for (unsigned int currDepth=startDepth; currDepth<endDepth; currDepth++)
  {
    boost::chrono::steady_clock::time_point perLevelTrainStart = 
      boost::chrono::steady_clock::now(); 

    unsigned int frontierSize = _initFrontier(tree, params, currDepth);
    unsigned int nSlices = _initHistogram(params);

    
    if (nSlices>1)
    {
      BOOST_LOG_TRIVIAL(info) << "Maximum allowed global histogram size reached: split in "
			      << nSlices << " slices";
    }
    

    // Flag all images as to-be-skipped: the flag will be set to false if at least one
    // image pixel is processed
    std::fill_n(m_toSkipTsImg, trainingSet.getImages().size(), true);

    for (unsigned int i=0; i<nSlices; i++)
    {
      _traverseTrainingSet(trainingSet, params, currDepth, i);
      _learnBestFeatThr(tree, params, currDepth, i);
    }

    // Update skipped images flags
    std::copy(m_toSkipTsImg, m_toSkipTsImg+trainingSet.getImages().size(), m_skippedTsImg);
  

    boost::chrono::duration<double> perLevelTrainTime =
      boost::chrono::duration_cast<boost::chrono::duration<double> >(boost::chrono::steady_clock::now() - 
								   perLevelTrainStart);
    
    BOOST_LOG_TRIVIAL(info) << "Depth " << currDepth << " trained in "
			    << perLevelTrainTime.count() << " seconds";
    
  }

  _cleanTrain();
}
コード例 #11
0
ファイル: DataSet.cpp プロジェクト: felipesfaria/FariaTcc
void DataSet::InitFoldSets(TrainingSet &ts, ValidationSet &vs, int fold)
{
	int vStart = nSamples*(fold - 1) / nFolds;
	int vEnd = nSamples*fold / nFolds;
	ts.Init(nSamples - (vEnd - vStart), nFeatures);
	vs.Init((vEnd - vStart), nFeatures);
	for (int i = 0; i < nSamples; i++)
	{
		if (i >= vStart&&i<vEnd)
			vs.PushSample(X[i], Y[i]);
		else
			ts.PushSample(X[i], Y[i]);
	}
}
コード例 #12
0
void PruebaPantalla::on_DotMatrixRepresentationButton_clicked()
{
	QString openDir = QFileDialog::getOpenFileName(this, //widget
												  "Abrir conjunto de entrenamiento", //caption
												  "/home/edixon/programacion/INSYDE//samples/TrainingSets", //dir
												  "Conjunto de entrenamiento (*.tsf)", //filter
												  new QString("Conjunto de entrenamiento (*.tsf)"));

	if(openDir == "") return;

	TrainingSet *ts = new TrainingSet(openDir);

	DotMatrixRepresentation *dmr = new DotMatrixRepresentation(ts->getInputs()[0]);

	dmr->show();
}
コード例 #13
0
ファイル: ClassificationTree.cpp プロジェクト: mrthat/cvpr
double	ClassificationTree::calc_entropy_gain(const TrainingSet &train_set, const TrainingSet &left_set, const TrainingSet &right_set, const RandomizedTreeParameter &param) const
{
	double		entropy_gain	=	0;

	entropy_gain	=	train_set.compute_label_entropy()
		- left_set.size() / (float)train_set.size() * left_set.compute_label_entropy()
		- right_set.size() / (float)train_set.size() * right_set.compute_label_entropy();

	return entropy_gain;
}
コード例 #14
0
ファイル: ClassificationTree.cpp プロジェクト: mrthat/cvpr
bool	ClassificationTree::is_end_growth(const TrainingSet &train_set, const cvpr::RandomizedTreeParameter &param, unsigned tree_height) const
{
	if (param.max_height <= tree_height) {
		return true;
	}

	if (train_set.size() <= param.min_samples) {
		return true;
	}

	return false;
}
コード例 #15
0
void Apta::build(TrainingSet trainingSet, bool useWhiteNodes)
{
    if (useWhiteNodes) {
        this->_useWhiteNodes = true;
    }

    // Start with the root of APTA colored red
    this->_addNode(true, this->_rootId, "", "", '\0');

    for (pair<string, bool> sample : trainingSet.get()) {
        this->_addPath(this->_rootId, sample.first,
            sample.second ? Apta::ACCEPTED : Apta::REJECTED);
    }
}
コード例 #16
0
ファイル: SimpFnExtGPU.cpp プロジェクト: AtnesNess/annetgpgpu
int main(int argc, char *argv[]) {
	QApplication a(argc, argv);

	TrainingSet input;
	input.AddInput(red);
	input.AddInput(green);
	input.AddInput(dk_green);
	input.AddInput(blue);
	input.AddInput(dk_blue);
	input.AddInput(yellow);
	input.AddInput(orange);
	input.AddInput(purple);
	input.AddInput(black);
	input.AddInput(white);

	std::vector<float> vCol(3);
	int w1 = 40;
	int w2 = 4;

	SOMNetGPU gpu;
	gpu.CreateSOM(3, 1, w1,w1);
	gpu.SetTrainingSet(input);
	
	SetFcn(&ownFn);
	gpu.SetDistFunction(ownFn);
	// or just: SetFcn(gpu.GetDistFunction() );

	gpu.Training(1000);

	SOMReader w(w1, w1, w2);
	for(int x = 0; x < w1*w1; x++) {
		SOMNeuron *pNeur = (SOMNeuron*)((SOMLayer*)gpu.GetOPLayer())->GetNeuron(x);
		vCol[0] = pNeur->GetConI(0)->GetValue();
		vCol[1] = pNeur->GetConI(1)->GetValue();
		vCol[2] = pNeur->GetConI(2)->GetValue();

		w.SetField(QPoint(pNeur->GetPosition()[0], pNeur->GetPosition()[1]), vCol );
	}
	w.Save("SimpFnExtByGPU.png");
	return 0;
}
コード例 #17
0
void NeuralNetwork::test(TrainingSet &testSet)
{
	vector<InputImage *>* data = testSet.getData();

	int numCorrect = 0;
	for (vector<InputImage *>::iterator testImage = data->begin(); testImage != data->end(); ++testImage)
	{
		Mat *trainingImageMat = (*testImage)->getImage();
		vector<int> *actualLabel = (*testImage)->getLabelVector();

		// Get V
		Mat V = parameters * (*trainingImageMat);

		// Compute prediction
		vector<float> predictions(LABEL_SIZE);
		predictHelper(V, predictions);

		// Find max for prediction
		float max = 0;
		int maxInd = 0;
		int count = 0;
		for (vector<float>::iterator it = predictions.begin(); it != predictions.end(); ++it)
		{
			if (*it > max)
			{
				max = *it;
				maxInd = count;
			}
			count++;
		}

		char predictedChar = InputImage::oneHotIndexToChar(maxInd);
		cout << "Predicted: " << predictedChar << " | Actual: " << (*testImage)->getCharLabel() << endl;
		if (tolower(predictedChar) == tolower((*testImage)->getCharLabel()))
		{
			numCorrect++;
		}
	}

	float percentCorrect = ((float)numCorrect / (float)data->size()) * 100;
	cout << "Percent correct: " << (int)percentCorrect << "%" << endl;
}
コード例 #18
0
void NeuralNetwork::train(TrainingSet &trainingSet)
{
	vector<InputImage *>* data = trainingSet.getData();

	vector<float> G;

	// Repeat until convergence
	bool hasConverged = false;
	int count = 0;
	float avgCrossEntropy = 100;
	time_t timer;
	time(&timer);
	int k = 0;
	while (!hasConverged)
	{
		if (count > MIN_TRAIN_TIME)
		{
			hasConverged = true;
			break;
		}
		count++;

		if (count % 5 == 0)
		{
			cout << count << "th cycle with " << avgCrossEntropy << " avg cross entropy" << endl;
			cout << difftime(time(0), timer) << " seconds elapsed" << endl;
		}

		// Reset average crossentropy
		avgCrossEntropy = 0;

		// Get predictions
		vector<vector<float>> allPredictions;
		vector<InputImage *> inputImages;
		for (int m = k; m < k + BATCH_SIZE; ++m)
		{
			int ind = m % data->size();

			Mat *trainingImageMat = data->at(ind)->getImage();
			vector<int> *actualLabel = data->at(ind)->getLabelVector();

			// Get V
			Mat V = parameters * (*trainingImageMat);

			// Compute prediction
			vector<float> predictions(LABEL_SIZE);
			predictHelper(V, predictions);

			avgCrossEntropy -= (logf(predictions[data->at(ind)->getLabelIndex()]));

			allPredictions.push_back(predictions);
			inputImages.push_back(data->at(ind));
		}

		// Update parameters
		for (int i = 0; i < parameters.rows; ++i)
		{
			for (int j = 0; j < parameters.cols; ++j)
			{
				float grad = 0;
#pragma omp parallel for reduction(+:grad)
				for (int p = 0; p < BATCH_SIZE; p++)
				{
					grad += inputImages.at(p)->getImage()->at<float>(j, 0) * (inputImages.at(p)->getLabelVector()->at(i) - allPredictions[p][i]);
				}

				parameters.at<float>(i, j) += TRAINING_STEP * grad;
			}
		}

		// Average the cross entropy
		avgCrossEntropy /= BATCH_SIZE;

		k += BATCH_SIZE;
	}

	// Save to file
	ofstream nnsave;
	nnsave.open("savednn.txt");
	for (int i = 0; i < parameters.rows; ++i)
	{
		for (int j = 0; j < parameters.cols; ++j)
		{
			nnsave << parameters.at<float>(i, j) << "\t";
		}
		nnsave << endl;
	}
	nnsave << endl;
	nnsave.close();

	//cout << parameters << endl;
}
コード例 #19
0
void BackPropagation::TrainNeuralNetwork(LayeredFeedForwardNeuralNet& networkToTrain, const TrainingSet& trainingSet) const
{
    long trainingIterations = 0;
    double cumulativeNetworkError = DBL_MAX;
    TrainingSet trainingSetCopy = trainingSet;
    
    // get activation derivative function for delta rule
    std::shared_ptr<IUnaryExpressionParser> pExpressionParser = UnaryExpressionParserFactory::CreateDerivativeParser();
    UnaryFunction activationDerivative = pExpressionParser->GetFunctionForExpression(networkToTrain.GetActivationFunction());
    
    while (cumulativeNetworkError > m_targetNetworkError && trainingIterations < m_iterationLimit)
    {
        std::cout << "Enet = " << cumulativeNetworkError << std::endl;
        
        // reset network error for new training set iteration.
        cumulativeNetworkError = 0.0;
        
        // begin a new training cycle: put exemplars in random order
        std::random_shuffle(trainingSetCopy.begin(), trainingSetCopy.end());
        
        for (const Exemplar& exemplar : trainingSetCopy)
        {
            // fire the neural network and record activations at each layer
            std::vector<VectorXd> layerActivations;
            layerActivations.push_back(exemplar.first);
            for (long layerIndex = 1; layerIndex < networkToTrain.GetLayerCount(); layerIndex++)
            {
                layerActivations.push_back(
                    networkToTrain.FireSingleLayer(layerActivations[layerIndex - 1], layerIndex)
                );
            }
            
            // deque of errors on each layer (so we can add in reverse order)
            std::deque<VectorXd> layerErrors;
            
            // iterate over the layers in reverse order (back propagating), calculating errors.
            // reverse order because error in below layers is dependent on error of above layers.
            for (long layerIndex = networkToTrain.GetLayerCount() - 1; layerIndex > 0; layerIndex--)
            {
                VectorXd currentLayerError; // what we're trying to calculate
                const VectorXd& currentLayerActivation = layerActivations[layerIndex];
                
                if (layerIndex == networkToTrain.GetLayerCount() - 1)
                {
                    // this is the output layer's error, which is calculated against the known exemplar expected output
                    // Eo = (Do - Yo)Yo([1_0..1_n] - Yo)    for sigmoid (we use generalised delta rule and derivative of activation fn)
                    const VectorXd& expectedOutputPattern = exemplar.second;
                    currentLayerError = (expectedOutputPattern - currentLayerActivation) * currentLayerActivation.unaryExpr(activationDerivative);
                } else {
                    // this is a hidden layer error vector, which is calculated against the above layer's error and input weights.
                    // Ehy = Yh(1 - Yh)Wi^T.Eo    for sigmoid (we use generalised delta rule and derivative of activation fn)
                    MatrixXd aboveLayerInputWeights = networkToTrain.GetLayerInputWeights(layerIndex + 1);
                    const VectorXd& aboveLayerError = layerErrors.front();
                    // when calculating hidden layer errors we don't care about bias weights for the input weights of the above layer.
                    // this is because the "error of the bias unit" in a hidden layer is not used to calculate changes in weights below. so get rid of these to simplify calculation.
                    MatrixXd aboveLayerInputWeightsMinusBias = aboveLayerInputWeights.leftCols(aboveLayerInputWeights.cols() - 1);
                    // note we use cwiseProduct because we want to multiply elements of weighted error vector against deriative of current layer activations.
                    currentLayerError = (aboveLayerInputWeightsMinusBias.transpose() * aboveLayerError).cwiseProduct(currentLayerActivation.unaryExpr(activationDerivative));
                }
                layerErrors.push_front(currentLayerError);
            }
            // push a dummy 0 error to error deque so error/activation stl vector elements line up.
            layerErrors.push_front(VectorXd::Zero(exemplar.first.size()));
            
            // next we need to iterate over errors for each layer (excluding dummy input layer), calculating change in input weights.
            for (long layerIndex = 1; layerIndex < networkToTrain.GetLayerCount(); layerIndex++)
            {
                // get weight matrix to adjust
                MatrixXd weightsToAdjust = networkToTrain.GetLayerInputWeights(layerIndex);
                
                // get previous layer's activations (plus bias value)
                VectorXd previousLayerActivationPlusBias(weightsToAdjust.cols());
                previousLayerActivationPlusBias << layerActivations[layerIndex - 1], VectorXd::Constant(1, -1.0);
                
                // calculate change in weights ΔW = η Yh^T . Eo (where . is outer product)
                MatrixXd layerInputWeightsDelta = (layerErrors[layerIndex] * previousLayerActivationPlusBias.transpose()) * m_learningRate;
                
                // update neural net weights
                weightsToAdjust += layerInputWeightsDelta;
                networkToTrain.SetLayerInputWeights(weightsToAdjust, layerIndex);
                
                std::cout << "Weights for layer " << layerIndex << " are now:" << std::endl;
                std::cout << weightsToAdjust << std::endl;
            }
            
            // ok now update the cumulative network error.
            // this is (expected - actual activations) normalised, squared and then halved.
            cumulativeNetworkError += (exemplar.second - layerActivations.back()).squaredNorm() / 2;
            
        } // end for training-set-iteration
        trainingIterations++;
    } // target reached (or iteration limit exceeded). end training.
    
    if (trainingIterations == m_iterationLimit)
    {
        std::cout << "Iteration limit reached - optimisation did not converge on a global minimum." << std::endl;
    } else {
        std::cout << "Target network error reached after " << trainingIterations << " training set iterations." << std::endl;
    }
}
コード例 #20
0
ファイル: WINMAIN.CPP プロジェクト: amirna2/fingerprints
//*******************************************************************
// WinMain - Neural main
//
// parameters:
//             hInstance     - The instance of this instance of this
//                             application.
//             hPrevInstance - The instance of the previous instance
//                             of this application. This will be 0
//                             if this is the first instance.
//             lpszCmdLine   - A long pointer to the command line that
//                             started this application.
//             cmdShow       - Indicates how the window is to be shown
//                             initially. ie. SW_SHOWNORMAL, SW_HIDE,
//                             SW_MIMIMIZE.
//
// returns:
//             wParam from last message.
//
//*******************************************************************
int PASCAL WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance,
						 LPSTR lpszCmdLine, int cmdShow)
{


/*
	Declarations of local variables
*/

	int control_file_number = -1 ;           // Stack pointer for control files
	FILE *control_files[MAX_CONTROL_FILES] ; // This is the stack

	char *control_line ;    // User's commands here
	char *command, *rest ;  // Pointers to its command and parameter parts
	int n_command, n_rest ; // Lengths of those parts

	int net_model = -1 ;     // Network model (see NETMOD_? in CONST.H)
	int out_model = -1 ;     // Output model (see OUTMOD_? in CONST.H)
	int n_inputs = -1 ;      // Number of input neurons
	int n_outputs = -1 ;     // Number of output neurons
	int n_hidden1 = -1 ;     // Number of hidden layer one neurons
	int n_hidden2 = -1 ;     // Ditto layer 2 (0 if just one hidden layer)


	TrainingSet *tset = NULL ;            // Training set here
	Network *network = NULL ;             // Network here
	struct LearnParams learn_params ;     // General learning parameters
	struct AnnealParams anneal_params ;   // Simulated annealing parameters
	struct GenInitParams geninit_params ; // Genetic initialization parameters
	struct KohParams koh_params ;         // Kohonen parameters

	int classif_output = -1 ;  // Current class (0=reject) for classif training
	char out_file[80] = "" ;   // File for EXECUTE output
	float threshold ;         // CLASSIFY confusion reject cutoff
	char resp_file[80]="";     // file for initializing output neuron's name
	char train_file[80]="";
/*
	Miscellaneous variables
*/

	int i, n, m ;
	float p ;
	char *msg ;
	FILE *fp ;
	unsigned long me,mc;
	char *fname;
	char *control;

#if VERSION_16_BIT
	if (sizeof(int) > 2) {
		printf ( "\nRecompile with VERSION_16_BIT set to 0 in CONST.H" ) ;
		exit ( 1 ) ;
		}
#else
	if (sizeof(int) < 4) {
		printf ( "\nRecompile with VERSION_16_BIT set to 1 in CONST.H" ) ;
		exit ( 1 ) ;
		}
#endif


printf ( "\nNEURAL SYSTEM - Program to train and test neural networks" ) ;

if (argc>1)
{
  strcpy(fname,argv[1]);
}


/*
   Process command line parameters
*/

   mem_name[0] = 0 ;  // Default is no memory allocation file
 /*
   if (strlen ( mem_name )) {
      strcat ( mem_name , ":mem.log" ) ;
      fp = fopen ( mem_name , "wt" ) ;
      if (fp == NULL) {
	 printf ( "\nCannot open debugging file %s", mem_name ) ;
	 exit ( 1 ) ;
	 }
      fclose ( fp ) ;
      mem_log = 1 ;
      }
   else
      mem_log = 0 ;
   */
   mem_log  = 0 ;
   mem_used = 0 ;
/*
   Initialize defaults
*/

   learn_params.init = -1 ;
   learn_params.quit_err = 0.0 ;
   learn_params.retries = 32767 ;

   anneal_params.temps0 = 3 ;
   anneal_params.temps = 4 ;
   anneal_params.iters0 = 50 ;
   anneal_params.iters = 20 ;
   anneal_params.setback0 = 50 ;
   anneal_params.setback = 20 ;
   anneal_params.start0 = 3.0 ;
   anneal_params.start = 4.0 ;
   anneal_params.stop0 = 1.0 ;
   anneal_params.stop = 0.02 ;

   geninit_params.pool = 50 ;
   geninit_params.gens = 3 ;
   geninit_params.climb = 0 ;
   geninit_params.overinit = 1.5 ;
   geninit_params.pcross = 0.8 ;
   geninit_params.pmutate = 0.0001 ;

   koh_params.normalization = 0 ;  // 0=multiplicative, 1=Z
   koh_params.learn_method = 1 ;   // 0=additive, 1=subtractive
   koh_params.rate = 0.4 ;         // learning rate
   koh_params.reduction = 0.99 ;   // learning rate reduction

   learn_params.ap = &anneal_params ;
   learn_params.gp = &geninit_params ;
   learn_params.kp = &koh_params ;


   act_func_init () ; // Initialize interpolation table for activation function

   MEMTEXT ( "NEURAL: control_line, msg" ) ;
   if (((control_line = (char *) MALLOC ( CONTROL_LINE_LENGTH+1 )) == NULL)
    || ((msg = (char *) MALLOC ( CONTROL_LINE_LENGTH+1 )) == NULL)) {
      printf ( "\nInsufficient memory" ) ;
      exit ( 1 ) ;
      }

/*
   Main loop processes all commands
*/

   for (;;) {
      if (argv[1])
      {
	 strcpy(control_line,"CONTROL:");
	 strcat(control_line,fname);
	 //printf("%s\n",control_line);
	 argv[1]=NULL;
      }
      else
	 get_control_line ( control_line , &control_file_number, control_files ) ;

      split_control_line ( control_line , &command , &n_command ,
			   &rest , &n_rest ) ;

      if (! n_command) {
	 if (n_rest) {
	    sprintf ( msg , "No colon after command: %s", rest ) ;
	    error_message ( msg ) ;
	    }
	 continue ;
	 }

      sprintf ( msg , "%s : %s", command, rest ) ;
      normal_message ( msg ) ;

/*
   Act on the command
*/

      if (! strcmp ( command , "QUIT" ))
	 break ;

      if (! strcmp ( command , "CONTROL" )) {
	 stack_control_file (rest, &control_file_number, control_files) ;
	 continue ;
	 }

      if (! strcmp ( command , "NETWORK MODEL" )) {
	 // Multi layer network
	 if (! strcmp ( rest , "LAYER" ))
	    n = NETMOD_LAYER ;
	 // Kohonen network
	 else if (! strcmp ( rest , "KOHONEN" ))
	    n = NETMOD_KOH ;
	 // Hopfield network
	 else if (! strcmp ( rest , "HOPFIELD" ))
	    n = NETMOD_HOP ;
	// Bidirectionnal associative memory network
	 else if (! strcmp ( rest , "BAM" ))
	    n = NETMOD_BAM ;

	 else {
	    sprintf ( msg , "Illegal NETWORK MODEL: %s", rest ) ;
	    error_message ( msg ) ;
	    continue ;
	    }
	 if (net_model == n)
	    continue ;
	 if (ok_to_clear_weights( &network )) {
	    net_model = n ;
	    learn_params.init = -1 ;
	    }
	 else
	    warning_message ( "Command aborted" ) ;
	 continue ;
	 }

      if (! strcmp ( command , "OUTPUT MODEL" )) {
	 if (! strcmp ( rest , "CLASSIFY" ))
	    n = OUTMOD_CLASSIFY ;
	 else if (! strcmp ( rest , "AUTO" ))
	    n = OUTMOD_AUTO ;
	 else if (! strcmp ( rest , "GENERAL" ))
	    n = OUTMOD_GENERAL ;
	 else {
	    sprintf ( msg , "Illegal OUTPUT MODEL: %s", rest ) ;
	    error_message ( msg ) ;
	    continue ;
	    }
	 if (out_model == n)
	    continue ;
	 if ((ok_to_clear_tset( &tset )) && (ok_to_clear_weights( &network)))
	    out_model = n ;
	 else
	    warning_message ( "Command aborted" ) ;
	 continue ;
	 }

      if (! strcmp ( command , "N INPUTS" )) {
	 m = sscanf ( rest , "%d" , &n ) ;
	 if ((m <= 0)  ||  (n <= 0)  ||  (n > MAX_INPUTS)) {
	    sprintf ( msg , "Illegal N INPUTS: %s", rest ) ;
	    error_message ( msg ) ;
	    continue ;
	    }
	 if (n_inputs == n)
	    continue ;
	 if ((ok_to_clear_tset( &tset)) && (ok_to_clear_weights(&network)))
	    n_inputs = n ;
	 else
	    warning_message ( "Command aborted" ) ;
	 continue ;
	 }

      if (! strcmp ( command , "N OUTPUTS" )) {
	 m = sscanf ( rest , "%d" , &n ) ;
	 if ((m <= 0)  ||  (n <= 0)  ||  (n > MAX_OUTPUTS)) {
	    sprintf ( msg , "Illegal N OUTPUTS: %s", rest ) ;
	    error_message ( msg ) ;
	    continue ;
	    }
	 if (n_outputs == n)
	    continue ;
	 if ((ok_to_clear_tset( &tset)) && (ok_to_clear_weights(&network)))
	    n_outputs = n ;
	 else
	    warning_message ( "Command aborted" ) ;
	 continue ;
	 }

      if (! strcmp ( command , "N HIDDEN1" )) {
	 m = sscanf ( rest , "%d" , &n ) ;
	 if ((m <= 0)  ||  (n < 0)  ||  (n > MAX_HIDDEN)) {
	    sprintf ( msg , "Illegal N HIDDEN1: %s", rest ) ;
	    error_message ( msg ) ;
	    continue ;
	    }
	 if (n_hidden1 == n)
	    continue ;
	 if (ok_to_clear_weights( &network ))
	    n_hidden1 = n ;
	 else
	    warning_message ( "Command aborted" ) ;
	 continue ;
	 }

      if (! strcmp ( command , "N HIDDEN2" )) {
	 m = sscanf ( rest , "%d" , &n ) ;
	 if ((m <= 0)  ||  (n < 0)  ||  (n > MAX_HIDDEN)) {
	    sprintf ( msg , "Illegal N HIDDEN2: %s", rest ) ;
	    error_message ( msg ) ;
	    continue ;
	    }
	 if (n  &&  ! n_hidden1) {
	    error_message ( "N HIDDEN2 must be 0 if N HIDDEN1 IS 0." ) ;
	    continue ;
	    }
	 if (n_hidden2 == n)
	    continue ;
	 if (ok_to_clear_weights( &network ))
	    n_hidden2 = n ;
	 else
	    warning_message ( "Command aborted" ) ;
	 continue ;
	 }

      if (! strcmp ( command , "TRAIN" )) {
	 if ((out_model == OUTMOD_AUTO)  &&  (n_outputs != n_inputs)) {
	    warning_message ( "Setting N OUTPUTS = N INPUTS" ) ;
	    n_outputs = n_inputs ;
	    }
	 if (out_model <= 0)
	    error_message ( "TRAIN used before OUTPUT MODEL set." ) ;
	 else if (n_inputs <= 0)
	    error_message ( "TRAIN used before N INPUTS set." ) ;
	 else if (n_outputs <= 0)
	    error_message ( "TRAIN used before N OUTPUTS set." ) ;
	 else if ((net_model == NETMOD_HOP) && (n_inputs != n_outputs))
	    error_message("HOPFIELD netowork requires INPUTS = OUTPUTS.");
	 else if ((net_model == NETMOD_BAM) && (out_model != OUTMOD_GENERAL))
	    error_message("BAM network requires AUTO output mode.");
	 else if ((net_model == NETMOD_HOP) && (out_model != OUTMOD_AUTO))
	    error_message("HOFIELD network requires AUTO output mode.");
	 else if ((net_model != NETMOD_KOH) && (out_model == OUTMOD_CLASSIFY)
		  &&  (classif_output < 0))
	    error_message( "CLASSIFY output mode but CLASSIFY OUTPUT not set.");
	 else if ((net_model == NETMOD_KOH)  &&  (out_model != OUTMOD_CLASSIFY))
	    error_message( "KOHONEN network requires CLASSIFY output mode.");
	 else {
	    if (tset == NULL) {
	       MEMTEXT ( "NEURAL: new tset" ) ;
	       tset = new TrainingSet ( out_model , n_inputs , n_outputs ) ;
	       }
	    tset->train ( rest , classif_output ) ;
	    strcpy(train_file,rest);
	 }
	 continue ;
	 }

      if (check_anneal ( command , rest , &anneal_params ))
	 continue ;

      if (check_genetic ( command , rest , &geninit_params ))
	 continue ;

      if (check_kohonen ( command , rest , &koh_params , &network ))
	 continue ;

      if (check_learn_params ( command , rest , &learn_params , net_model ))
	 continue ;

      if (! strcmp ( command , "LEARN" )) {
	 if ((tset == NULL)  ||  (tset->ntrain == 0)) {
	    error_message ( "Cannot LEARN; No training set exists." ) ;
	    continue ;
	    }
	 if ((net_model == NETMOD_KOH)  &&  (out_model != OUTMOD_CLASSIFY)) {
	    error_message( "KOHONEN network requires CLASSIFY output mode.");
	    continue ;
	    }
	 if (learn_params.init < 0) {
	    error_message( "Initialization method not set.");
	    continue ;
	    }
	 if (network == NULL)
	 {
	    if (net_model == NETMOD_LAYER)
	    {
	       if (n_hidden1 < 0)
	       {
		  error_message ( "LEARN used before N HIDDEN1 set." ) ;
		  continue ;
	       }
	       else if (n_hidden2 < 0)
	       {
		  error_message ( "LEARN used before N HIDDEN2 set." ) ;
		  continue ;
	       }
	       else
	       {
		  MEMTEXT ( "NEURAL: new LayerNet" ) ;
		  network = new LayerNet ( out_model , n_inputs , n_hidden1 ,
					   n_hidden2 , n_outputs , 1 , 1 ) ;
	       }
	    }
	    else if (net_model == NETMOD_KOH)
	    {
	       MEMTEXT ( "NEURAL: new KohNet" ) ;
	       network = new KohNet ( n_inputs , n_outputs ,
				      &koh_params , 1 , 1 ) ;
	    }
	    else if (net_model == NETMOD_HOP)
	    {

	       MEMTEXT ( "NEURAL: new HopNet" );
	       network = new HopNet (n_inputs,n_outputs, 1,1);
	    }

	    else if (net_model == NETMOD_BAM)
	    {
	       MEMTEXT ("NEURAL: new BamNet");
	       network = new LayerNet ( out_model , n_inputs , n_hidden1 ,
					n_hidden2 , n_outputs , 1 , 1 ) ;

	    }
	 }
	 if ((network == NULL)  ||  (! network->ok)) {  // Malloc failure?
	    memory_message ( "to create network." ) ;
	    if (network != NULL) {
	       delete network ;
	       network = NULL ;
	       }
	    continue ;
	    }
	 normal_message("Learning...\n");
	 network->learn ( tset , &learn_params ) ;
	 normal_message("End of Learning\n");
	 if (network->neterr > 0.999999) {  // Indicates massive failure
	    MEMTEXT ( "NEURAL: learn failure delete network" ) ;
	    delete network ;
	    network = NULL ;
	    }
	 else {
	    sprintf ( msg , "Final error = %.4lf%% of max possible",
		      100.0 * network->neterr ) ;
	    normal_message ( msg ) ;
	    }
	 continue ;
	 }

      if (! strcmp ( command , "SAVE WEIGHTS" )) {
	 if (network == NULL)
	    error_message ( "There are no learned weights to save." ) ;
	 else
	    wt_save ( network , net_model , 0 , rest ) ;
	 continue ;
	 }

      if (! strcmp ( command , "RESTORE WEIGHTS" )) {
	 if (network != NULL) {
	    MEMTEXT ( "NEURAL: delete network for restore" ) ;
	    delete network ;
	    network = NULL ;
	    }
	 network = wt_restore ( rest , &net_model ) ;
	 if (network == NULL)
	    continue ;
	 if (tset != NULL) {
	    if ((tset->nin != network->nin)
	     || (tset->nout != network->nout)
	     || (tset->outmod != network->outmod)) {
	       error_message ( "Network conflicts with existing training set.");
	       continue ;
	       }
	    }
	 out_model = network->outmod ;
	 n_inputs = network->nin ;
	 n_outputs = network->nout ;
	 if (net_model == NETMOD_LAYER) {
	    n_hidden1 = ((LayerNet*) network)->nhid1 ;
	    n_hidden2 = ((LayerNet*) network)->nhid2 ;
	    }
	 if (net_model == NETMOD_KOH)
	    koh_params.normalization = ((KohNet *) network)->normalization ;
	 learn_params.init = -1 ;
	 continue ;
	 }

      if (! strcmp ( command , "CLEAR TRAINING" )) {
	 if (tset != NULL) {
	    MEMTEXT ( "NEURAL: delete tset" ) ;
	    delete tset ;
	    tset = NULL ;
	    }
	 continue ;
	 }

      if (! strcmp ( command , "CLEAR WEIGHTS" )) {
	 if (network != NULL) {
	    MEMTEXT ( "NEURAL: delete network" ) ;
	    delete network ;
	    network = NULL ;
	    }
	 continue ;
	 }

      if (! strcmp ( command , "CLASSIFY OUTPUT" )) {
	 if (net_model == NETMOD_KOH) {
	    error_message ( "Cannot specify output for KOHONEN model." ) ;
	    continue ;
	    }
	 if (n_outputs < 0) {
	    error_message ( "CLASSIFY OUTPUT used before N OUTPUTS set." ) ;
	    continue ;
	    }
	 if (out_model != OUTMOD_CLASSIFY) {
	    error_message
		  ( "CLASSIFY OUTPUT only valid when OUTPUT MODEL:CLASSIFY" ) ;
	    continue ;
	    }
	 m = sscanf ( rest , "%d" , &n ) ;
	 if ((m <= 0)  ||  (n < 0)) {
	    sprintf ( msg , "Illegal CLASSIFY OUTPUT: %s", rest ) ;
	    error_message ( msg ) ;
	    }
	 else if (n > n_outputs) {
	    sprintf ( msg , "CLASSIFY OUTPUT (%d) exceeds N OUTPUTS (%d)",
		      n, n_outputs ) ;
	    error_message ( msg ) ;
	    }
	 else
	    classif_output = n ;
	 continue ;
	 }

      if (! strcmp ( command , "OUTPUT FILE" )) {
	 strcpy ( out_file , rest ) ;
	 continue ;
	 }

      if (! strcmp ( command , "EXECUTE" ))
      {
	 if (network == NULL)
	    error_message ( "There is no trained network" ) ;
	 else
	 {
	    network->execute_from_file ( rest , out_file) ;
	    continue ;
	 }
      }

      if (! strcmp ( command , "TEST NETWORK" ))
      {
	 if (network == NULL)
	    error_message ( "There is no trained network" ) ;
	 else
	 {
	    network->test_from_file ( rest ,out_file,net_model) ;
	    continue ;
	 }
      }

      if (! strcmp ( command , "CLASSIFY" )) {
	 if (network == NULL)
	    error_message ( "There is no trained network" ) ;
	 else if (out_model != OUTMOD_CLASSIFY)
	    error_message ( "CLASSIFY valid only in CLASSIFY output mode" ) ;
	 else
	    network->classify_from_file ( rest , threshold ) ;
	 continue ;
	 }

      if (! strcmp ( command , "RESET CONFUSION" )) {
         if (network == NULL)
            error_message ( "There is no trained network" ) ;
         else
            network->reset_confusion () ;
         continue ;
         }

      if (! strcmp ( command , "CONFUSION THRESHOLD" )) {
	 p = atof ( rest ) ;
	 if ((p < 0.0)  ||  (p > 100.0)) {
	    sprintf ( msg , "Illegal CONFUSION THRESHOLD: %s", rest ) ;
            error_message ( msg ) ;
            }
	 else
            threshold = p / 100.0 ;
         continue ;
         }

      if (! strcmp ( command , "SHOW CONFUSION" )) {
         if (network == NULL)
            error_message ( "There is no trained network" ) ;
         else if (out_model != OUTMOD_CLASSIFY)
	    error_message ( "CONFUSION valid only in CLASSIFY output mode" ) ;
         else
            network->show_confusion () ;
         continue ;
	 }

      if (! strcmp ( command , "SAVE CONFUSION" )) {
         if (network == NULL)
            error_message ( "There is no trained network" ) ;
         else if (out_model != OUTMOD_CLASSIFY)
            error_message ( "CONFUSION valid only in CLASSIFY output mode" ) ;
         else
            network->save_confusion ( rest ) ;
	 continue ;
         }

      sprintf ( msg , "Unknown command: %s", command ) ;
      error_message ( msg ) ;

      } // Endless command loop

   MEMTEXT ( "NEURAL: control_line, msg" ) ;
   FREE ( control_line ) ;
   FREE ( msg ) ;
   MEMCLOSE () ;
   exit ( 0 ) ;
}
コード例 #21
0
void CLTreeTrainer<ImgType, nChannels, FeatType, FeatDim, nClasses>::_initTrain(
  Tree<FeatType, FeatDim, nClasses> &tree,
  const TrainingSet<ImgType, nChannels> &trainingSet,
  const TreeTrainerParameters<FeatType, FeatDim> &params,
  unsigned int startDepth, unsigned int endDepth)
{
  unsigned int nNodes = (2<<(endDepth-1))-1;
  cl_int errCode;

  // Init OpenCL tree buffers and load corresponding data
  m_clTreeLeftChildBuff = cl::Buffer(m_clContext,
				     CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR,
				     nNodes*sizeof(cl_uint),
				     (void*)tree.getLeftChildren());
  m_clTreeFeaturesBuff = cl::Buffer(m_clContext,
				    CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR,
				    nNodes*sizeof(FeatType)*FeatDim,
				    (void*)tree.getFeatures());
  m_clTreeThrsBuff = cl::Buffer(m_clContext,
				CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR,
				nNodes*sizeof(FeatType),
				(void*)tree.getThresholds());
  m_clTreePosteriorsBuff = cl::Buffer(m_clContext,
				      CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR,
				      nNodes*sizeof(cl_float)*nClasses,
				      (void*)tree.getPosteriors());

  // Init per-node total and per-class number of samples
  m_perNodeTotSamples = new unsigned int[nNodes];
  m_perClassTotSamples = new unsigned int[nNodes*nClasses];
  std::fill_n(m_perNodeTotSamples, nNodes, 0);
  std::fill_n(m_perClassTotSamples, nNodes*nClasses, 0);

  // Init to-skip flags for training set images
  m_toSkipTsImg = new bool[trainingSet.getImages().size()];
  m_skippedTsImg = new bool[trainingSet.getImages().size()];
  std::fill_n(m_skippedTsImg, trainingSet.getImages().size(), false);

  // Init OpenCL training set image buffer:
  // - first of all, iterate through the training set and find the maximum
  //   image width/height
  m_maxTsImgWidth=0;
  m_maxTsImgHeight=0;
  m_maxTsImgSamples=0;
  m_perNodeTotSamples[0] = 0;
  const std::vector<TrainingSetImage<ImgType, nChannels> > &tsImages = trainingSet.getImages();
  for (typename std::vector<TrainingSetImage<ImgType, nChannels> >::const_iterator it=tsImages.begin();
       it!=tsImages.end(); ++it)
  {
    const TrainingSetImage<ImgType, nChannels> &currImage=*it;
    
    if (currImage.getWidth()>m_maxTsImgWidth) m_maxTsImgWidth=currImage.getWidth();
    if (currImage.getHeight()>m_maxTsImgHeight) m_maxTsImgHeight=currImage.getHeight();

    // Get maximum number of sampled pixels as well
    if (currImage.getNSamples()>m_maxTsImgSamples) m_maxTsImgSamples=currImage.getNSamples();

    /** \todo update here total number of pixel per class at root node */
    m_perNodeTotSamples[0]+=currImage.getNSamples();
  }

  // Make the maximum width and height a multiple of the, respectively, work-group x and y
  // dimension
  m_maxTsImgWidth += (m_maxTsImgWidth%WG_WIDTH) ? WG_WIDTH-(m_maxTsImgWidth%WG_WIDTH) : 0;
  m_maxTsImgHeight += (m_maxTsImgHeight%WG_HEIGHT) ? WG_HEIGHT-(m_maxTsImgHeight%WG_HEIGHT) : 0;

  // - initialize OpenCL images
  cl::size_t<3> origin, region;
  size_t rowPitch;
  origin[0]=0; origin[1]=0; origin[2]=0;
  region[0]=m_maxTsImgWidth; region[1]=m_maxTsImgHeight;
  region[2]= (nChannels<=4) ? 1 : nChannels;

  cl::ImageFormat clTsImgFormat;
  ImgTypeTrait<ImgType, nChannels>::toCLImgFmt(clTsImgFormat);
  if (nChannels<=4)
  {
    m_clTsImg1 = new cl::Image2D(m_clContext, CL_MEM_READ_ONLY, clTsImgFormat,
				 m_maxTsImgWidth, m_maxTsImgHeight);
    m_clTsImg2 = new cl::Image2D(m_clContext, CL_MEM_READ_ONLY, clTsImgFormat,
				 m_maxTsImgWidth, m_maxTsImgHeight);
  }
  else
  {
    m_clTsImg1 = new cl::Image3D(m_clContext, CL_MEM_READ_ONLY, clTsImgFormat,
				 m_maxTsImgWidth, m_maxTsImgHeight, nChannels);
    m_clTsImg2 = new cl::Image3D(m_clContext, CL_MEM_READ_ONLY, clTsImgFormat,
				 m_maxTsImgWidth, m_maxTsImgHeight, nChannels);
  }
  m_clTsImgPinn = cl::Buffer(m_clContext,
			     CL_MEM_READ_ONLY|CL_MEM_ALLOC_HOST_PTR,
			     m_maxTsImgWidth*m_maxTsImgHeight*nChannels*sizeof(ImgType)*2);
  m_clTsImgPinnPtr = 
    reinterpret_cast<ImgType*>(m_clQueue1.enqueueMapBuffer(m_clTsImgPinn, CL_TRUE,
							   CL_MAP_WRITE,
							   0, m_maxTsImgWidth*m_maxTsImgHeight*nChannels*sizeof(ImgType)*2));

  clTsImgFormat.image_channel_order = CL_R;
  clTsImgFormat.image_channel_data_type = CL_UNSIGNED_INT8;
  region[2] = 1;
  m_clTsLabelsImg1 = cl::Image2D(m_clContext, CL_MEM_READ_ONLY, clTsImgFormat,
				 m_maxTsImgWidth, m_maxTsImgHeight);
  m_clTsLabelsImg2 = cl::Image2D(m_clContext, CL_MEM_READ_ONLY, clTsImgFormat,
				 m_maxTsImgWidth, m_maxTsImgHeight);
  m_clTsLabelsImgPinn = cl::Buffer(m_clContext,
				   CL_MEM_READ_ONLY|CL_MEM_ALLOC_HOST_PTR,
				   m_maxTsImgWidth*m_maxTsImgHeight*sizeof(cl_uchar)*2);
  m_clTsLabelsImgPinnPtr = 
    reinterpret_cast<unsigned char*>(m_clQueue1.enqueueMapBuffer(m_clTsLabelsImgPinn, CL_TRUE,
								 CL_MAP_WRITE,
								 0, m_maxTsImgWidth*m_maxTsImgHeight*sizeof(cl_uchar)*2));

  clTsImgFormat.image_channel_data_type = CL_SIGNED_INT32;
  m_clTsNodesIDImg1 = cl::Image2D(m_clContext, CL_MEM_READ_ONLY, clTsImgFormat,
				  m_maxTsImgWidth, m_maxTsImgHeight);
  m_clTsNodesIDImg2 = cl::Image2D(m_clContext, CL_MEM_READ_ONLY, clTsImgFormat,
				  m_maxTsImgWidth, m_maxTsImgHeight);
  m_clTsNodesIDImgPinn = cl::Buffer(m_clContext,
				     CL_MEM_READ_ONLY|CL_MEM_ALLOC_HOST_PTR,
				     m_maxTsImgWidth*m_maxTsImgHeight*sizeof(cl_uint)*GLOBAL_HISTOGRAM_FIFO_SIZE);
  m_clTsNodesIDImgPinnPtr =
    reinterpret_cast<int*>(m_clQueue1.enqueueMapBuffer(m_clTsNodesIDImgPinn, CL_TRUE,
						       CL_MAP_READ|CL_MAP_WRITE,
						       0, m_maxTsImgWidth*m_maxTsImgHeight*sizeof(cl_uint)*GLOBAL_HISTOGRAM_FIFO_SIZE));

  m_clPredictImg1 = cl::Image2D(m_clContext, CL_MEM_WRITE_ONLY, clTsImgFormat,
				m_maxTsImgWidth, m_maxTsImgHeight);
  m_clPredictImg2 = cl::Image2D(m_clContext, CL_MEM_WRITE_ONLY, clTsImgFormat,
				m_maxTsImgWidth, m_maxTsImgHeight);
  
  // Init OpenCL buffers for per-image histogram computation
  FeatType *tmpFeatLowBounds = new FeatType[FeatDim];
  FeatType *tmpFeatUpBounds = new FeatType[FeatDim];
  std::copy(params.featLowBounds, params.featLowBounds+FeatDim, tmpFeatLowBounds);
  std::copy(params.featUpBounds, params.featUpBounds+FeatDim, tmpFeatUpBounds);
  m_clFeatLowBoundsBuff = cl::Buffer(m_clContext,
				     CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR,
				     FeatDim*sizeof(FeatType),
				     (void*)tmpFeatLowBounds);
  m_clFeatUpBoundsBuff = cl::Buffer(m_clContext,
				    CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR,
				    FeatDim*sizeof(FeatType),
				    (void*)tmpFeatUpBounds);

  m_clTsSamplesBuff1 = cl::Buffer(m_clContext,
				  CL_MEM_READ_ONLY,
				  m_maxTsImgSamples*sizeof(cl_uint));
  m_clTsSamplesBuff2 = cl::Buffer(m_clContext,
				  CL_MEM_READ_ONLY,
				  m_maxTsImgSamples*sizeof(cl_uint));
  m_clTsSamplesBuffPinn = cl::Buffer(m_clContext,
				     CL_MEM_READ_ONLY|CL_MEM_ALLOC_HOST_PTR,
				     m_maxTsImgSamples*sizeof(cl_uint)*2);
  m_clTsSamplesBuffPinnPtr =
    reinterpret_cast<unsigned int*>(m_clQueue1.enqueueMapBuffer(m_clTsSamplesBuffPinn, CL_TRUE,
								CL_MAP_WRITE,
								0, m_maxTsImgSamples*sizeof(cl_uint)*2));

  
  // Note:
  // - 4D Historam (sample-ID, feature, class, threshold) can be compressed to 3D since we can access
  //   the sample class from labels image
  size_t perImgHistogramSize = m_maxTsImgSamples*params.nFeatures*params.nThresholds;
  m_clPerImgHistBuff1 = cl::Buffer(m_clContext,
				   CL_MEM_WRITE_ONLY,
				   perImgHistogramSize*sizeof(cl_uchar));
  m_clPerImgHistBuff2 = cl::Buffer(m_clContext,
				   CL_MEM_WRITE_ONLY,
				   perImgHistogramSize*sizeof(cl_uchar));
  m_clPerImgHistBuffPinn = cl::Buffer(m_clContext,
				      CL_MEM_WRITE_ONLY|CL_MEM_ALLOC_HOST_PTR,
				      perImgHistogramSize*sizeof(cl_uchar)*GLOBAL_HISTOGRAM_FIFO_SIZE);
  m_clPerImgHistBuffPinnPtr =
    reinterpret_cast<unsigned char*>(m_clQueue1.enqueueMapBuffer(m_clPerImgHistBuffPinn, CL_TRUE,
								 CL_MAP_READ,
								 0, perImgHistogramSize*sizeof(cl_uchar)*GLOBAL_HISTOGRAM_FIFO_SIZE));


  // Init buffers used for best per-node feature/threshold pair learning
  /** 
   * \todo how to find a proper value for per-thread feature/threshold pairs and parallely-learnt
   *       node's best feature/threshold pair? At least avoid hard-coded values and parameterize them.
   * \todo check if per-thread pairs number is a multiple of total per-node pairs
  */
  size_t maxFrontierSize = (endDepth>2) ? (2<<(endDepth-3)) : 1;
  size_t perNodeHistogramSize = nClasses*params.nFeatures*params.nThresholds;
  unsigned int perThreadFeatThrPairs = PER_THREAD_FEAT_THR_PAIRS;
  unsigned int parLearntNodes = (maxFrontierSize>PARALLEL_LEARNT_NODES) ? 
    PARALLEL_LEARNT_NODES : maxFrontierSize;
  size_t learnBuffsSize = parLearntNodes*(params.nFeatures*params.nThresholds)/perThreadFeatThrPairs;
  m_clHistogramBuff = cl::Buffer(m_clContext,
				 CL_MEM_READ_ONLY,
				 parLearntNodes*perNodeHistogramSize*sizeof(cl_uint));
  m_clBestFeaturesBuff = cl::Buffer(m_clContext,
				    CL_MEM_WRITE_ONLY,
				    learnBuffsSize*sizeof(cl_uint));
  m_clBestThresholdsBuff = cl::Buffer(m_clContext,
				      CL_MEM_WRITE_ONLY,
				      learnBuffsSize*sizeof(cl_uint));
  m_clBestEntropiesBuff = cl::Buffer(m_clContext,
				     CL_MEM_WRITE_ONLY,
				     learnBuffsSize*sizeof(cl_float));
  m_clPerClassTotSamplesBuff = cl::Buffer(m_clContext,
					  CL_MEM_READ_ONLY,
					  parLearntNodes*nClasses*sizeof(cl_uint));
				    
  // Set kernels arguments that does not change between calls:
  // - prediction
  // Hack: use the labels image as mask, i.e. assume pixels with non-zero label
  //       are marked as foreground pixels
  //m_clPredictKern.setArg(0, m_clTsImg);
  //m_clPredictKern.setArg(1, m_clTsLabelsImg);
  m_clPredictKern.setArg(2, nChannels);
  m_clPredictKern.setArg(5, m_clTreeLeftChildBuff);
  m_clPredictKern.setArg(6, m_clTreeFeaturesBuff);
  m_clPredictKern.setArg(7, FeatDim);
  m_clPredictKern.setArg(8, m_clTreeThrsBuff);
  m_clPredictKern.setArg(9, m_clTreePosteriorsBuff);
  //m_clPredictKern.setArg(10, m_clTsNodesIDImg);
  //m_clPredictKern.setArg(11, m_clPredictImg);
  m_clPredictKern.setArg(12, cl::Local(sizeof(FeatType)*WG_WIDTH*WG_HEIGHT*FeatDim));

  // - per-image histogram update
  //m_clPerImgHistKern.setArg(0, m_clTsImg);
  m_clPerImgHistKern.setArg(1, nChannels);
  //m_clPerImgHistKern.setArg(4, m_clTsLabelsImg);
  //m_clPerImgHistKern.setArg(5, m_clTsNodesIDImg);
  //m_clPerImgHistKern.setArg(6, m_clTsSamplesBuff);
  m_clPerImgHistKern.setArg(8, FeatDim);
  m_clPerImgHistKern.setArg(9, m_clFeatLowBoundsBuff);
  m_clPerImgHistKern.setArg(10, m_clFeatUpBoundsBuff);
  m_clPerImgHistKern.setArg(11, params.nThresholds);
  m_clPerImgHistKern.setArg(12, params.thrLowBound);
  m_clPerImgHistKern.setArg(13, params.thrUpBound);
  //m_clPerImgHistKern.setArg(14, m_clPerImgHistBuff);
  m_clPerImgHistKern.setArg(15, tree.getID());
  m_clPerImgHistKern.setArg(18, m_clTreeLeftChildBuff);
  m_clPerImgHistKern.setArg(19, m_clTreePosteriorsBuff);
  m_clPerImgHistKern.setArg(20, cl::Local(sizeof(FeatType)*8));
  m_clPerImgHistKern.setArg(21, cl::Local(sizeof(FeatType)*WG_WIDTH*WG_HEIGHT*FeatDim));

  // - node's best feature/threshold learning
  m_clLearnBestFeatKern.setArg(0, m_clHistogramBuff);
  m_clLearnBestFeatKern.setArg(1, m_clPerClassTotSamplesBuff);
  m_clLearnBestFeatKern.setArg(2, params.nFeatures);
  m_clLearnBestFeatKern.setArg(3, params.nThresholds);
  m_clLearnBestFeatKern.setArg(4, nClasses);
  m_clLearnBestFeatKern.setArg(5, perThreadFeatThrPairs);
  m_clLearnBestFeatKern.setArg(6, m_clBestFeaturesBuff);
  m_clLearnBestFeatKern.setArg(7, m_clBestThresholdsBuff);
  m_clLearnBestFeatKern.setArg(8, m_clBestEntropiesBuff);


  // Init corresponding host buffers
  /** \todo use mapping/unmapping to avoid device/host copy */
  //m_tsNodesIDImg = new int[m_maxTsImgWidth*m_maxTsImgHeight*GLOBAL_HISTOGRAM_FIFO_SIZE];
  //m_perImgHist = new unsigned char[perImgHistogramSize*GLOBAL_HISTOGRAM_FIFO_SIZE];
  m_bestFeatures = new unsigned int[learnBuffsSize];
  m_bestThresholds = new unsigned int[learnBuffsSize];
  m_bestEntropies = new float[learnBuffsSize];

  // Done with OpenCL initialization


  // Init the global histogram:
  // define the global histogram as a vector of per-node histograms. The total size of
  // the global histogram (defined as number of per-node histograms simultaneously kept)
  // is limited by the smaller between maxFrontierSize and
  // GLOBAL_HISTOGRAM_MAX_SIZE/perNodeHistogramSize
  m_histogramSize = std::min(maxFrontierSize,
			     (size_t)floorl((double)GLOBAL_HISTOGRAM_MAX_SIZE/(perNodeHistogramSize*sizeof(unsigned int))));
  m_histogram = new unsigned int*[m_histogramSize];
  for (int i=0; i<m_histogramSize; i++) m_histogram[i] = new unsigned int[perNodeHistogramSize];


  // Buffer used to track to-train nodes for each depth
  m_frontier = new int[maxFrontierSize];


  // Note: the histogram for the root node is equal to the training set priors
  if (startDepth==1)
  {
    const TreeNode<FeatType, FeatDim> &rootNode = tree.getNode(0); 
    std::copy(trainingSet.getPriors(), trainingSet.getPriors()+nClasses, rootNode.m_posterior);
  }

  delete []tmpFeatUpBounds;
  delete []tmpFeatLowBounds;

  // Done
}
コード例 #22
0
TrainingSetFile::TSFResult TrainingSetFile::fromFile(QFile &file)
{
	QString
			version,
			text;

	QStringRef name;

	QXmlStreamReader tsReadXML;

	QXmlStreamReader::TokenType tt;
	QStringList textElements;
	QXmlStreamAttributes attributes;

	TrainingSetFile *retTSF = new TrainingSetFile();
	TSFResult res = {retTSF, true, NoError, "", 0};

	TrainingSet *ts = retTSF->getTrainingSet();

	int
			lastPatternIndex = 0,
			sTextElements,
			pSize = 0,
			iSize = 0,
			tSize = 0;

	Normalization
			*inor = new Normalization(),
			*tnor = new Normalization();

	vector<vector<double> >
			inputs,
			targets;

	DataRepresentation
			*idr = ts->getInputsDataRepresentation(),
			*tdr = ts->getTargetsDataRepresentation();

	if(file.open(QIODevice::ReadOnly)){
		tsReadXML.setDevice(&file);
		while (!tsReadXML.atEnd()) {
			tt = tsReadXML.readNext();

			if(tsReadXML.hasError()){
				file.close();
				return {retTSF, false, toTSFError(tsReadXML.error()), tsReadXML.errorString(), tsReadXML.lineNumber()};
			}

			if(tt == QXmlStreamReader::StartDocument){
				continue;
			}else if(tt == QXmlStreamReader::StartElement){
				name = tsReadXML.name();
				if(name == STR_TRAININGSET){
					attributes = tsReadXML.attributes();
					if(attributes.hasAttribute(STR_PATTERNSIZE) &&
					   attributes.hasAttribute(STR_INPUTSSIZE) &&
					   attributes.hasAttribute(STR_TARGETSSIZE))
					{
						pSize = attributes.value(STR_PATTERNSIZE).toInt();
						iSize = attributes.value(STR_INPUTSSIZE).toInt();
						tSize = attributes.value(STR_TARGETSSIZE).toInt();

						inputs = vector<vector<double> >(pSize, vector<double>(iSize, 0));
						targets = vector<vector<double> >(pSize, vector<double>(tSize, 0));
					}else{
						file.close();
						return {
							retTSF, false, NotWellFormedError, "NotWellFormedError: Missing attributes (" + STR_PATTERNSIZE + ", " + STR_INPUTSSIZE + ", " + STR_TARGETSSIZE + ") on tag " + STR_TRAININGSET, tsReadXML.lineNumber()
						};
					}
				}else if(name == STR_PROPERTIES){
					attributes = tsReadXML.attributes();
					if(attributes.hasAttribute(STR_VERSION)){
						version = attributes.value(STR_VERSION).toString();
					}else{
						file.close();
						return
						{
							retTSF, false, NotWellFormedError, "NotWellFormedError: Missing attributes (" + STR_VERSION + ") on tag " + STR_PROPERTIES, tsReadXML.lineNumber()
						};
					}
				}else if(name == STR_INPUTSDATAREPRESENTATION){
					attributes = tsReadXML.attributes();
					if(attributes.hasAttribute(STR_NAME) &&
					   attributes.hasAttribute(STR_WIDTH) &&
					   attributes.hasAttribute(STR_HEIGHT) &&
					   attributes.hasAttribute(STR_FORMAT))
					{
						idr->setType(drFromStrToInt(attributes.value(STR_NAME).toString()));
						idr->setWidth(attributes.value(STR_WIDTH).toInt());
						idr->setHeight(attributes.value(STR_HEIGHT).toInt());
						idr->setImageFormat(fromStrToImgFormat(attributes.value(STR_FORMAT).toString()));
					}else{
						file.close();
						return
						{
							retTSF, false, NotWellFormedError, "NotWellFormedError: Missing attributes (" + STR_NAME + ", " + STR_WIDTH + ", " + STR_HEIGHT + ", " + STR_FORMAT + ") on tag " + STR_INPUTSDATAREPRESENTATION, tsReadXML.lineNumber()
						};
					}
				}else if(name == STR_TARGETSDATAREPRESENTATION){
					attributes = tsReadXML.attributes();
					if(attributes.hasAttribute(STR_NAME) &&
					   attributes.hasAttribute(STR_WIDTH) &&
					   attributes.hasAttribute(STR_HEIGHT) &&
					   attributes.hasAttribute(STR_FORMAT))
					{
						tdr->setType(drFromStrToInt(attributes.value(STR_NAME).toString()));
						tdr->setWidth(attributes.value(STR_WIDTH).toInt());
						tdr->setHeight(attributes.value(STR_HEIGHT).toInt());
						tdr->setImageFormat(fromStrToImgFormat(attributes.value(STR_FORMAT).toString()));
					}else{
						file.close();
						return
						{
							retTSF, false, NotWellFormedError, "NotWellFormedError: Missing attributes (" + STR_NAME + ", " + STR_WIDTH + ", " + STR_HEIGHT + ", " + STR_FORMAT + ") on tag " + STR_TARGETSDATAREPRESENTATION, tsReadXML.lineNumber()
						};
					}
				}else if(name == STR_INPUTSNORMALIZATION){
					attributes = tsReadXML.attributes();
					if(attributes.hasAttribute(STR_TYPE) &&
					   attributes.hasAttribute(STR_MAXVALUE) &&
					   attributes.hasAttribute(STR_MINVALUE) &&
					   attributes.hasAttribute(STR_THRESHOLD) &&
					   attributes.hasAttribute(STR_AMPLITUDE) &&
					   attributes.hasAttribute(STR_ELONGATION))
					{
						inor->setType(normFromStrToInt(attributes.value(STR_TYPE).toString()));
						inor->setMaxValue(attributes.value(STR_MAXVALUE).toDouble());
						inor->setMinValue(attributes.value(STR_MINVALUE).toDouble());
						inor->setThreshold(attributes.value(STR_THRESHOLD).toDouble());
						inor->setAmplitude(attributes.value(STR_AMPLITUDE).toDouble());
						inor->setElongation(attributes.value(STR_ELONGATION).toDouble());
					}else{
						file.close();
						return
						{
							retTSF, false, NotWellFormedError, "NotWellFormedError: Missing attributes (" + STR_TYPE + ", " + STR_MAXVALUE + ", " + STR_MINVALUE + ", " + STR_THRESHOLD + ", " + STR_ELONGATION + ") on tag " + STR_INPUTSNORMALIZATION, tsReadXML.lineNumber()
						};
					}
				}else if(name == STR_TARGETSNORMALIZATION){
					attributes = tsReadXML.attributes();
					if(attributes.hasAttribute(STR_TYPE) &&
					   attributes.hasAttribute(STR_MAXVALUE) &&
					   attributes.hasAttribute(STR_MINVALUE) &&
					   attributes.hasAttribute(STR_THRESHOLD) &&
					   attributes.hasAttribute(STR_AMPLITUDE) &&
					   attributes.hasAttribute(STR_ELONGATION))
					{
						tnor->setType(normFromStrToInt(attributes.value(STR_TYPE).toString()));
						tnor->setMaxValue(attributes.value(STR_MAXVALUE).toDouble());
						tnor->setMinValue(attributes.value(STR_MINVALUE).toDouble());
						tnor->setThreshold(attributes.value(STR_THRESHOLD).toDouble());
						tnor->setAmplitude(attributes.value(STR_AMPLITUDE).toDouble());
						tnor->setElongation(attributes.value(STR_ELONGATION).toDouble());
					}else{
						file.close();
						return
						{
							retTSF, false, NotWellFormedError, "NotWellFormedError: Missing attributes (" + STR_TYPE + ", " + STR_MAXVALUE + ", " + STR_MINVALUE + ", " + STR_THRESHOLD + ", " + STR_ELONGATION + ") on tag " + STR_TARGETSNORMALIZATION, tsReadXML.lineNumber()
						};
					}
				}else if(name == STR_PATTERN){
					attributes = tsReadXML.attributes();
					if(attributes.hasAttribute(STR_INDEX))
					{
						lastPatternIndex = attributes.value(STR_INDEX).toInt();
					}else{
						file.close();
						return
						{
							retTSF, false, NotWellFormedError, "NotWellFormedError: Missing attributes (" + STR_INDEX + ") on tag " + STR_PATTERN, tsReadXML.lineNumber()
						};
					}
				}else if(name == STR_INPUTS){
					text = tsReadXML.readElementText(QXmlStreamReader::SkipChildElements);
					textElements = text.split(STR_SEPARATOR, QString::KeepEmptyParts, Qt::CaseInsensitive);
					sTextElements = textElements.size();
					if(sTextElements == iSize){
						for(int i = 0; i < sTextElements; i++){
							inputs[lastPatternIndex][i] = textElements[i].toDouble();
						}
					}else{
						file.close();
						return
						{
							retTSF, false, NotWellFormedError, "NotWellFormedError: Incongruence between reported input size with found inputs elements", tsReadXML.lineNumber()
						};
					}
				}else if(name == STR_TARGETS){
					text = tsReadXML.readElementText(QXmlStreamReader::SkipChildElements);
					textElements = text.split(STR_SEPARATOR, QString::KeepEmptyParts, Qt::CaseInsensitive);
					sTextElements = textElements.size();
					if(sTextElements == tSize){
						for(int t = 0; t < sTextElements; t++){
							targets[lastPatternIndex][t] = textElements[t].toDouble();
						}
					}else{
						file.close();
						return
						{
							retTSF, false, NotWellFormedError, "NotWellFormedError: Incongruence between reported target size with found target elements", tsReadXML.lineNumber()
						};
					}
				}
			}
		}

		retTSF->setFileName(file.fileName());
		res.file = retTSF;

		ts->setPatternCount(pSize);
		ts->setInputs(inputs, iSize);
		ts->setTargets(targets, tSize);
		ts->setInputsNormalization(inor);
		ts->setTargetsNormalization(tnor);
		ts->setInputsDataRepresentation(idr);
		ts->setTargetsDataRepresentation(tdr);

		res.sucess = true;
		res.errnum = toTSFError(QXmlStreamReader::NoError);
		res.errormsg = "";
		res.line = -1;

		file.close();
		return res;
	}else{
		file.close();
		return
		{
			retTSF, false, toTSFError(file.error()), file.errorString(), -1
		};
	}
}
コード例 #23
0
ファイル: FaceRecognizer.cpp プロジェクト: el-bart/TIGER
FaceRecognizer::FaceRecognizer(const TrainingSet& set, const double avgThRangeScale, const double maxThreshold):
  imgNorm_( set.normalizer() ),
  faceRecognizer_( makeFaceRecognizer() )
{
  if( set.entries().size() < 2 )
    throw Util::Exception{ UTIL_LOCSTRM << "training samples set needs to have at least 2 elements" };

  int                  nextFreeLabel = 0;
  std::vector<cv::Mat> faces;
  std::vector<int>     labels;
  std::vector<cv::Mat> testSet;
  // prepare space for the destination data
  faces.reserve( set.samples() );
  labels.reserve( set.samples() );
  labMap_.reserve( set.samples() );
  testSet.reserve( set.samples() );

  // prepare data in the format used by the learning algorithm
  for( const auto& e: set.entries() )
  {
    const std::string& name = e.first;
    const cv::Mat&     face = e.second;
    // find/create id to assign
    int id = -1;
    // initially this vector is ordered by string names
    const LabelMap::value_type searchValue(-1/*whatever*/, name);
    const auto                 swo = [](const LabelMap::value_type& lhs, const LabelMap::value_type& rhs) -> bool { return lhs.second < rhs.second; };
    const auto                 it  = std::lower_bound( labMap_.begin(), labMap_.end(), searchValue, swo );
    if( it==labMap_.end() || it->second!=name )
    {
      // assign new label
      id = nextFreeLabel;
      ++nextFreeLabel;
      labMap_.insert( it, LabelMap::value_type{id, name} );
      // first image from every class keep as a test one
      testSet.push_back(face);
      continue;
    }
    else
    {
      // use already assigned id
      assert( it->second==name );
      id = it->first;
    }
    // add to the containers
    faces.push_back(face);
    labels.push_back(id);
  }

  // learn data
  assert( faceRecognizer_.get() != nullptr );
  assert( faces.size() == labels.size() );
  //for(size_t i=0; i<faces.size(); ++i)
  faceRecognizer_->train(faces, labels);

  // set threshold according to what has been learned
  double thMin = 99999999999999;
  double thMax = 0;
  for(const auto& face: testSet)
  {
    int    label = -1;
    double dist  = -1;
    faceRecognizer_->predict(face, label, dist);
    if(dist<thMin)
      thMin = dist;
    if(dist>thMax)
      thMax = dist;
  }
  const double diff = thMax - thMin;
  threshold_ = thMin + diff * avgThRangeScale;
  if( threshold_ > maxThreshold )
    threshold_ = maxThreshold;

  // final version needs to be sorted by int ids, for easier search
  {
    const auto swo = [](const LabelMap::value_type& lhs, const LabelMap::value_type& rhs) -> bool { return lhs.first < rhs.first; };
    std::sort( labMap_.begin(), labMap_.end(), swo );
  }
  assert( labMap_.size() == static_cast<size_t>(nextFreeLabel) );
}
コード例 #24
0
ファイル: NEURAL.CPP プロジェクト: abishekahluwaila/read
int main (
   int argc ,    // Number of command line arguments (includes prog name)
   char *argv[]  // Arguments (prog name is argv[0])
   )

{

/*
   Declarations of local variables
*/

/*
   User's command control line related variables are here.
   Control_file_number and control_files permit nesting of 'CONTROL' commands.
   If control_file_number equals -1, control commands are read from stdin.
   Otherwise they are read from that file in FILE *control_files.
   Up to MAX_CONTROL_FILES can be stacked.
*/

   int control_file_number = -1 ;           // Stack pointer for control files
   FILE *control_files[MAX_CONTROL_FILES] ; // This is the stack

   char *control_line ;    // User's commands here
   char *command, *rest ;  // Pointers to its command and parameter parts
   int n_command, n_rest ; // Lengths of those parts

/*
   These are network parameters which may be set by the user via commands.
   They are initialized to defaults which indicate that the user has not
   yet set them.  As they are set, their current values are placed here.
   When learning is done for a network, their values are copied from here
   into the network object.  When a network is read, the object's values
   are copied from it to here.  Otherwise, these variables are not used;
   the values in the network object itself are used.  The only purpose of
   these variables is to keep track of current values.
*/

   int net_model = -1 ;     // Network model (see NETMOD_? in CONST.H)
   int out_model = -1 ;     // Output model (see OUTMOD_? in CONST.H)
   int n_inputs = -1 ;      // Number of input neurons
   int n_outputs = -1 ;     // Number of output neurons
   int n_hidden1 = -1 ;     // Number of hidden layer one neurons
   int n_hidden2 = -1 ;     // Ditto layer 2 (0 if just one hidden layer)


   TrainingSet *tset = NULL ;            // Training set here
   Network *network = NULL ;             // Network here
   struct LearnParams learn_params ;     // General learning parameters
   struct AnnealParams anneal_params ;   // Simulated annealing parameters
   struct GenInitParams geninit_params ; // Genetic initialization parameters
   struct KohParams koh_params ;         // Kohonen parameters

   int classif_output = -1 ;  // Current class (0=reject) for classif training
   char out_file[80] = "" ;   // File for EXECUTE output
   double threshold ;         // CLASSIFY confusion reject cutoff

/*
   Miscellaneous variables
*/

   int i, n, m ;
   double p ;
   char *msg ;
   FILE *fp ;

/*
--------------------------------------------------------------------------------

   Program starts here.

   Verify that a careless user didn't fail to set the integer size
   correctly when compiling.

--------------------------------------------------------------------------------
*/

#if VERSION_16_BIT
   if (sizeof(int) > 2) {
      printf ( "\nRecompile with VERSION_16_BIT set to 0 in CONST.H" ) ;
      exit ( 1 ) ;
      }
#else
   if (sizeof(int) < 4) {
      printf ( "\nRecompile with VERSION_16_BIT set to 1 in CONST.H" ) ;
      exit ( 1 ) ;
      }
#endif

printf ( "\nNEURAL - Program to train and test neural networks" ) ;
printf("\nCopyright (c) 1993 by Academic Press, Inc.");
printf("\nAll rights reserved.  Permission is hereby granted, until further notice,");
printf("\nto make copies of this diskette, which are not for resale, provided these");
printf("\ncopies are made from this master diskette only, and provided that the");
printf("\nfollowing copyright notice appears on the diskette label:");
printf("\n(c) 1993 by Academic Press, Inc.");
printf("\nExcept as previously stated, no part of the computer program embodied in");
printf("\nthis diskette may be reproduced or transmitted in any form or by any means,");
printf("\nelectronic or mechanical, including input into storage in any information");
printf("\nsystem for resale, without permission in writing from the publisher.");
printf("\nProduced in the United States of America.");
printf("\nISBN 0-12-479041-0");

/*
   Process command line parameters
*/

   mem_name[0] = 0 ;  // Default is no memory allocation file

   for (i=1 ; i<argc ; i++) {  // Process all command line args
      str_to_upr ( argv[i] ) ; // Easier if all upper case

      if (! strcmp ( argv[i] , "/DEBUG" )) {
         sscanf ( argv[++i] , "%s" , mem_name ) ;
         if ((strlen ( mem_name ) > 1)  ||  ! isalpha ( mem_name[0] )) {
            printf ( "\nIllegal DEBUG drive (%s); must be 1 letter." ) ;
            exit ( 1 ) ;
            }
         continue ;
         }

      printf ( "\nIllegal command line parameter (%s)", argv[i] ) ;
      exit ( 1 ) ;
      }

/*
   Initialize memory allocation debugging
*/

   if (strlen ( mem_name )) {
      strcat ( mem_name , ":mem.log" ) ;
      fp = fopen ( mem_name , "wt" ) ;
      if (fp == NULL) {
         printf ( "\nCannot open debugging file %s", mem_name ) ;
         exit ( 1 ) ;
         }
      fclose ( fp ) ;
      mem_log = 1 ;
      }
   else 
      mem_log = 0 ;

   mem_used = 0 ;

/*
   Initialize defaults
*/

   learn_params.init = -1 ;
   learn_params.quit_err = 0.0 ;
   learn_params.retries = 32767 ;

   anneal_params.temps0 = 3 ;
   anneal_params.temps = 4 ;
   anneal_params.iters0 = 50 ;
   anneal_params.iters = 20 ;
   anneal_params.setback0 = 50 ;
   anneal_params.setback = 20 ;
   anneal_params.start0 = 3.0 ;
   anneal_params.start = 4.0 ;
   anneal_params.stop0 = 1.0 ;
   anneal_params.stop = 0.02 ;

   geninit_params.pool = 50 ;
   geninit_params.gens = 3 ;
   geninit_params.climb = 0 ;
   geninit_params.overinit = 1.5 ;
   geninit_params.pcross = 0.8 ;
   geninit_params.pmutate = 0.0001 ;

   koh_params.normalization = 0 ;  // 0=multiplicative, 1=Z 
   koh_params.learn_method = 1 ;   // 0=additive, 1=subtractive
   koh_params.rate = 0.4 ;         // learning rate
   koh_params.reduction = 0.99 ;   // learning rate reduction

   learn_params.ap = &anneal_params ;
   learn_params.gp = &geninit_params ;
   learn_params.kp = &koh_params ;

   act_func_init () ; // Initialize interpolation table for activation function

   MEMTEXT ( "NEURAL: control_line, msg" ) ;
   if (((control_line = (char *) MALLOC ( CONTROL_LINE_LENGTH+1 )) == NULL)
    || ((msg = (char *) MALLOC ( CONTROL_LINE_LENGTH+1 )) == NULL)) {
      printf ( "\nInsufficient memory" ) ;
      exit ( 1 ) ;
      }

/*
   Main loop processes all commands
*/

   for (;;) {

      get_control_line ( control_line , &control_file_number, control_files ) ;

      split_control_line ( control_line , &command , &n_command ,
                           &rest , &n_rest ) ;

      if (! n_command) {
         if (n_rest) {
            sprintf ( msg , "No colon after command: %s", rest ) ;
            error_message ( msg ) ;
            }
         continue ;
         }

      sprintf ( msg , "%s : %s", command, rest ) ;
      normal_message ( msg ) ;

/*
   Act on the command
*/

      if (! strcmp ( command , "QUIT" ))
         break ;

      if (! strcmp ( command , "CONTROL" )) {
         stack_control_file ( rest , &control_file_number , control_files ) ;
         continue ;
         }

      if (! strcmp ( command , "NETWORK MODEL" )) {
         if (! strcmp ( rest , "LAYER" ))
            n = NETMOD_LAYER ;
         else if (! strcmp ( rest , "KOHONEN" ))
            n = NETMOD_KOH ;
         else {
            sprintf ( msg , "Illegal NETWORK MODEL: %s", rest ) ;
            error_message ( msg ) ;
            continue ;
            }
         if (net_model == n)
            continue ;
         if (ok_to_clear_weights( &network )) {
            net_model = n ;
            learn_params.init = -1 ;
            }
         else
            warning_message ( "Command aborted" ) ;
         continue ;
         }

      if (! strcmp ( command , "OUTPUT MODEL" )) {
         if (! strcmp ( rest , "CLASSIFY" ))
            n = OUTMOD_CLASSIFY ;
         else if (! strcmp ( rest , "AUTO" ))
            n = OUTMOD_AUTO ;
         else if (! strcmp ( rest , "GENERAL" ))
            n = OUTMOD_GENERAL ;
         else {
            sprintf ( msg , "Illegal OUTPUT MODEL: %s", rest ) ;
            error_message ( msg ) ;
            continue ;
            }
         if (out_model == n)
            continue ;
         if ((ok_to_clear_tset( &tset )) && (ok_to_clear_weights( &network)))
            out_model = n ;
         else
            warning_message ( "Command aborted" ) ;
         continue ;
         }

      if (! strcmp ( command , "N INPUTS" )) {
         m = sscanf ( rest , "%d" , &n ) ;
         if ((m <= 0)  ||  (n <= 0)  ||  (n > MAX_INPUTS)) {
            sprintf ( msg , "Illegal N INPUTS: %s", rest ) ;
            error_message ( msg ) ;
            continue ;
            }
         if (n_inputs == n)
            continue ;
         if ((ok_to_clear_tset( &tset)) && (ok_to_clear_weights(&network)))
            n_inputs = n ;
         else
            warning_message ( "Command aborted" ) ;
         continue ;
         }

      if (! strcmp ( command , "N OUTPUTS" )) {
         m = sscanf ( rest , "%d" , &n ) ;
         if ((m <= 0)  ||  (n <= 0)  ||  (n > MAX_OUTPUTS)) {
            sprintf ( msg , "Illegal N OUTPUTS: %s", rest ) ;
            error_message ( msg ) ;
            continue ;
            }
         if (n_outputs == n)
            continue ;
         if ((ok_to_clear_tset( &tset)) && (ok_to_clear_weights(&network)))
            n_outputs = n ;
         else
            warning_message ( "Command aborted" ) ;
         continue ;
         }

      if (! strcmp ( command , "N HIDDEN1" )) {
         m = sscanf ( rest , "%d" , &n ) ;
         if ((m <= 0)  ||  (n < 0)  ||  (n > MAX_HIDDEN)) {
            sprintf ( msg , "Illegal N HIDDEN1: %s", rest ) ;
            error_message ( msg ) ;
            continue ;
            }
         if (n_hidden1 == n)
            continue ;
         if (ok_to_clear_weights( &network ))
            n_hidden1 = n ;
         else
            warning_message ( "Command aborted" ) ;
         continue ;
         }

      if (! strcmp ( command , "N HIDDEN2" )) {
         m = sscanf ( rest , "%d" , &n ) ;
         if ((m <= 0)  ||  (n < 0)  ||  (n > MAX_HIDDEN)) {
            sprintf ( msg , "Illegal N HIDDEN2: %s", rest ) ;
            error_message ( msg ) ;
            continue ;
            }
         if (n  &&  ! n_hidden1) {
            error_message ( "N HIDDEN2 must be 0 if N HIDDEN1 IS 0." ) ;
            continue ;
            }
         if (n_hidden2 == n)
            continue ;
         if (ok_to_clear_weights( &network ))
            n_hidden2 = n ;
         else
            warning_message ( "Command aborted" ) ;
         continue ;
         }

      if (! strcmp ( command , "TRAIN" )) {
         if ((out_model == OUTMOD_AUTO)  &&  (n_outputs != n_inputs)) {
            warning_message ( "Setting N OUTPUTS = N INPUTS" ) ;
            n_outputs = n_inputs ;
            }
         if (out_model <= 0)
            error_message ( "TRAIN used before OUTPUT MODEL set." ) ;
         else if (n_inputs <= 0)
            error_message ( "TRAIN used before N INPUTS set." ) ;
         else if (n_outputs <= 0)
            error_message ( "TRAIN used before N OUTPUTS set." ) ;
         else if ((net_model != NETMOD_KOH) && (out_model == OUTMOD_CLASSIFY)
                  &&  (classif_output < 0))
            error_message( "CLASSIFY output mode but CLASSIFY OUTPUT not set.");
         else if ((net_model == NETMOD_KOH)  &&  (out_model != OUTMOD_CLASSIFY))
            error_message( "KOHONEN network requires CLASSIFY output mode.");
         else {
            if (tset == NULL) {
               MEMTEXT ( "NEURAL: new tset" ) ;
               tset = new TrainingSet ( out_model , n_inputs , n_outputs ) ;
               }
            tset->train ( rest , classif_output ) ;
            }
         continue ;
         }

      if (check_anneal ( command , rest , &anneal_params ))
         continue ;

      if (check_genetic ( command , rest , &geninit_params ))
         continue ;

      if (check_kohonen ( command , rest , &koh_params , &network ))
         continue ;

      if (check_learn_params ( command , rest , &learn_params , net_model ))
         continue ;

      if (! strcmp ( command , "LEARN" )) {
         if ((tset == NULL)  ||  (tset->ntrain == 0)) {
            error_message ( "Cannot LEARN; No training set exists." ) ;
            continue ;
            }
         if ((net_model == NETMOD_KOH)  &&  (out_model != OUTMOD_CLASSIFY)) {
            error_message( "KOHONEN network requires CLASSIFY output mode.");
            continue ;
            }
         if (learn_params.init < 0) {
            error_message( "Initialization method not set.");
            continue ;
            }
         if (network == NULL) {
            if (net_model == NETMOD_LAYER) {
               if (n_hidden1 < 0) {
                  error_message ( "LEARN used before N HIDDEN1 set." ) ;
                  continue ;
                  }
               else if (n_hidden2 < 0) {
                  error_message ( "LEARN used before N HIDDEN2 set." ) ;
                  continue ;
                  }
               else {
                  MEMTEXT ( "NEURAL: new LayerNet" ) ;
                  network = new LayerNet ( out_model , n_inputs , n_hidden1 ,
                                           n_hidden2 , n_outputs , 1 , 1 ) ;
                  }
               }
            else if (net_model == NETMOD_KOH) {
               MEMTEXT ( "NEURAL: new KohNet" ) ;
               network = new KohNet ( n_inputs , n_outputs ,
                                      &koh_params , 1 , 1 ) ;
               }
            }
         if ((network == NULL)  ||  (! network->ok)) {  // Malloc failure?
            memory_message ( "to create network." ) ;
            if (network != NULL) {
               delete network ;
               network = NULL ;
               }
            continue ;
            }
         network->learn ( tset , &learn_params ) ;
         if (network->neterr > 0.999999) {  // Indicates massive failure
            MEMTEXT ( "NEURAL: learn failure delete network" ) ;
            delete network ;
            network = NULL ;
            }
         else {
            sprintf ( msg , "Final error = %.4lf%% of max possible",
                      100.0 * network->neterr ) ;
            normal_message ( msg ) ;
            }
         continue ;
         }

      if (! strcmp ( command , "SAVE WEIGHTS" )) {
         if (network == NULL)
            error_message ( "There are no learned weights to save." ) ;
         else
            wt_save ( network , net_model , 0 , rest ) ;
         continue ;
         }

      if (! strcmp ( command , "RESTORE WEIGHTS" )) {
         if (network != NULL) {
            MEMTEXT ( "NEURAL: delete network for restore" ) ;
            delete network ;
            network = NULL ;
            }
         network = wt_restore ( rest , &net_model ) ;
         if (network == NULL)
            continue ;
         if (tset != NULL) {
            if ((tset->nin != network->nin)
             || (tset->nout != network->nout)
             || (tset->outmod != network->outmod)) {
               error_message ( "Network conflicts with existing training set.");
               continue ;
               }
            }
         out_model = network->outmod ;
         n_inputs = network->nin ;
         n_outputs = network->nout ;
         if (net_model == NETMOD_LAYER) {
            n_hidden1 = ((LayerNet*) network)->nhid1 ;
            n_hidden2 = ((LayerNet*) network)->nhid2 ;
            }
         if (net_model == NETMOD_KOH)
            koh_params.normalization = ((KohNet *) network)->normalization ;
         learn_params.init = -1 ;
         continue ;
         }

      if (! strcmp ( command , "CLEAR TRAINING" )) {
         if (tset != NULL) {
            MEMTEXT ( "NEURAL: delete tset" ) ;
            delete tset ;
            tset = NULL ;
            }
         continue ;
         }

      if (! strcmp ( command , "CLEAR WEIGHTS" )) {
         if (network != NULL) {
            MEMTEXT ( "NEURAL: delete network" ) ;
            delete network ;
            network = NULL ;
            }
         continue ;
         }

      if (! strcmp ( command , "CLASSIFY OUTPUT" )) {
         if (net_model == NETMOD_KOH) {
            error_message ( "Cannot specify output for KOHONEN model." ) ;
            continue ;
            }
         if (n_outputs < 0) {
            error_message ( "CLASSIFY OUTPUT used before N OUTPUTS set." ) ;
            continue ;
            }
         if (out_model != OUTMOD_CLASSIFY) {
            error_message
                  ( "CLASSIFY OUTPUT only valid when OUTPUT MODEL:CLASSIFY" ) ;
            continue ;
            }
         m = sscanf ( rest , "%d" , &n ) ;
         if ((m <= 0)  ||  (n < 0)) {
            sprintf ( msg , "Illegal CLASSIFY OUTPUT: %s", rest ) ;
            error_message ( msg ) ;
            }
         else if (n > n_outputs) {
            sprintf ( msg , "CLASSIFY OUTPUT (%d) exceeds N OUTPUTS (%d)",
                      n, n_outputs ) ;
            error_message ( msg ) ;
            }
         else
            classif_output = n ;
         continue ;
         }


      if (! strcmp ( command , "OUTPUT FILE" )) {
         strcpy ( out_file , rest ) ;
         continue ;
         }

      if (! strcmp ( command , "EXECUTE" )) {
         if (network == NULL)
            error_message ( "There is no trained network" ) ;
         else
            network->execute_from_file ( rest , out_file ) ;
         continue ;
         }

      if (! strcmp ( command , "CLASSIFY" )) {
         if (network == NULL)
            error_message ( "There is no trained network" ) ;
         else if (out_model != OUTMOD_CLASSIFY)
            error_message ( "CLASSIFY valid only in CLASSIFY output mode" ) ;
         else
            network->classify_from_file ( rest , threshold ) ;
         continue ;
         }

      if (! strcmp ( command , "RESET CONFUSION" )) {
         if (network == NULL)
            error_message ( "There is no trained network" ) ;
         else
            network->reset_confusion () ;
         continue ;
         }

      if (! strcmp ( command , "CONFUSION THRESHOLD" )) {
         p = atof ( rest ) ;
         if ((p < 0.0)  ||  (p > 100.0)) {
            sprintf ( msg , "Illegal CONFUSION THRESHOLD: %s", rest ) ;
            error_message ( msg ) ;
            }
         else
            threshold = p / 100.0 ;
         continue ;
         }

      if (! strcmp ( command , "SHOW CONFUSION" )) {
         if (network == NULL)
            error_message ( "There is no trained network" ) ;
         else if (out_model != OUTMOD_CLASSIFY)
            error_message ( "CONFUSION valid only in CLASSIFY output mode" ) ;
         else
            network->show_confusion () ;
         continue ;
         }

      if (! strcmp ( command , "SAVE CONFUSION" )) {
         if (network == NULL)
            error_message ( "There is no trained network" ) ;
         else if (out_model != OUTMOD_CLASSIFY)
            error_message ( "CONFUSION valid only in CLASSIFY output mode" ) ;
         else
            network->save_confusion ( rest ) ;
         continue ;
         }

      sprintf ( msg , "Unknown command: %s", command ) ;
      error_message ( msg ) ;

      } // Endless command loop

   MEMTEXT ( "NEURAL: control_line, msg" ) ;
   FREE ( control_line ) ;
   FREE ( msg ) ;
   MEMCLOSE () ;
   return 0 ;
}
コード例 #25
0
ファイル: trainingset.cpp プロジェクト: marlncpe/INSYDE
TrainingSet::TrainingSet(const TrainingSet &ts, QObject *parent) :
	QObject(parent)
{
	initTS(ts.getInputs(), ts.getTargets());
}
コード例 #26
0
ファイル: SupeRes.cpp プロジェクト: caomw/patchtable
void GeneraTrainSet()
{
	int num = LoadTrainPic();
	int traiNo = TRAINFILENAME.size();
	TrainingSet *trainSet = new TrainingSet();
	int keycount = 0, currentImgNo = 0;
	//Values(num);
	Key = cvCreateMat(num, 61, CV_32F);
	HighMean = new float[num];
	Valuse = new Point2d[num];
	//Values = cvCreateMat(num, 25, CV_32FC3);
	for (int i = 0; i < traiNo; i++)
	{
		//Mat srcImg = imread(TRAINFILENAME[i], 1);
		//Mat srcImg = imread("images\\input\\123.jpg",1);//CV_32S
		//Mat srcrgb = imread("images\\input\\123.jpg", CV_LOAD_IMAGE_GRAYSCALE);//CV_8U
		Mat srcImg = InputImg[i];
		Mat srColorImg= InputColorImg[i];
		int x=srColorImg.type();
		srcImg.convertTo(srcImg, CV_32FC1);
		srColorImg.convertTo(srColorImg,CV_32FC3);
		x=srColorImg.type();
		for (int row = 0; row < srcImg.rows; row++)
		{
			for (int col = 0; col < srcImg.cols; col++)
			{
				srcImg.at<float>(row, col) = srcImg.at<float>(row, col) / 255.0;
				srColorImg.at<Vec3f>(row, col)[0] = srColorImg.at<Vec3f>(row, col)[0] / 255.0;
				srColorImg.at<Vec3f>(row, col)[1] = srColorImg.at<Vec3f>(row, col)[1] / 255.0;
				srColorImg.at<Vec3f>(row, col)[2] = srColorImg.at<Vec3f>(row, col)[2] / 255.0;
			}
		}
		Mat lowResImg, highResImg, cubicImg;
		GeneraLHImg(lowResImg, highResImg, srcImg, cubicImg);
		Mat colorlowResImg, colorhighResImg, colorcubicImg;
		GeneraLHImg(colorlowResImg, colorhighResImg, srColorImg, colorcubicImg);
		char temps[50];
		sprintf(temps, "%d_lowresImg.png", i);
		string s(temps);
		imwrite(TEST + s, lowResImg*255.0);
		sprintf(temps, "%d_highImg.png", i);
		string c(temps);
		imwrite(TEST + c, highResImg*255.0);
		sprintf(temps, "%d_colorlowresImg.png", i);
		string sc(temps);
		imwrite(TEST + sc, colorlowResImg*255.0);
		sprintf(temps, "%d_colorhighresImg.png", i);
		string cs(temps);
		imwrite(TEST + cs, colorhighResImg*255.0);
		PatchGen *lowResPatches = new PatchGen(lowResImg, 7);
		lowResPatches->Initalize(CV_32F);
		PatchGen *higResPatches = new PatchGen(highResImg, 5);
		higResPatches->Initalize(CV_32F);

		//colorlowResImg.convertTo(colorlowResImg,CV_32FC3);
		PatchGen *colorlowResPatches = new PatchGen(colorlowResImg, 7);
		colorlowResPatches->Initalize(CV_32FC3);
		PatchGen *colorhigResPatches = new PatchGen(colorhighResImg, 5);
		colorhigResPatches->Initalize(CV_32FC3);

		InputHigh[i] = highResImg.clone();
		/*if (i == 4){
		int error = 1;
		}*/
		//trainSet->FileInfo(lowResPatches, higResPatches, ALPHA, lowResPatches->m_patch_rowNo, lowResPatches->m_patch_colNo, Key, Valuse, HighMean, keycount);
		trainSet->FileColorInfo(colorlowResPatches,colorhigResPatches,lowResPatches, higResPatches, ALPHA,COLORCONS, lowResPatches->m_patch_rowNo, lowResPatches->m_patch_colNo, Key, Valuse, HighMean, keycount);
		delete lowResPatches;
		delete higResPatches;
		delete colorlowResPatches;
		delete colorhigResPatches;
	}
	delete trainSet;
}