コード例 #1
0
void NeuralNetwork::trainOn(arma::mat& input, const arma::mat& output, int numIterations, int iterationsBetweenReport)
{
    if (input.n_cols != static_cast<unsigned int>(m_numNeuronsOnLayer[0]) ||
            output.n_cols != static_cast<unsigned int>(m_numNeuronsOnLayer[m_numLayers - 1]))
        throw InvalidInputException("File's input / output length doesn't match with the"
                                    "number of neurons on input / output layer.");

    if (m_featureNormalization)
        normalizeFeatures(input);

    double prevCost = computeCost(input, output, m_theta);
    double crtCost = prevCost;
    for (int iteration = 0; iteration < numIterations; ++iteration)
    {
		if (iterationsBetweenReport)
        	if (iteration % iterationsBetweenReport == 0 || iteration + 1 == numIterations)
            	std::cout << "Iteration: " << iteration << " | Cost: " << crtCost << std::endl;
        if (crtCost > prevCost)
        {
            std::cout << "The cost is increasing. Choose a smaller learning rate." << std::endl;
            return;
        }
        backprop(input, output);
        prevCost = crtCost;
        crtCost = computeCost(input, output, m_theta);
    }
}
コード例 #2
0
ファイル: segmentlist.cpp プロジェクト: Svensational/tidy
void SegmentList::prepare() {
   foreach (Segment * const segment, *this) {
      segment->relativizePosition();
      segment->calculateSpatialFeatures();
      segment->calculateColorFeatures();
      segment->calculateContour();
   }
   normalizeFeatures();
   calculateFeatureVariances();
}
コード例 #3
0
void CFeatureOperatorAnd::getModifiedState(CStateCollection *stateCol, CState *featState)
{
	int featureOffset = 1;

	std::list<CStateModifier *>::iterator it = getStateModifiers()->begin();
	std::list<CState *>::iterator stateIt = states->begin();


	CState *stateBuf;

	for (unsigned int i = 0; i < getNumDiscreteStates();i ++)
	{
		featState->setDiscreteState(i, 0);
		featState->setContinuousState(i, 1.0);
	}

	int repetitions = getNumDiscreteStates();
	for (int j = 0; it != getStateModifiers()->end(); it ++, stateIt ++, j ++)
	{
		repetitions /= (*it)->getNumDiscreteStates();
		stateBuf = NULL;
		if (stateCol->isMember(*it))
		{
			stateBuf = stateCol->getState(*it);
		}
		else
		{
			stateBuf = *stateIt;
			(*it)->getModifiedState(stateCol, stateBuf);
		}
		
		if (stateBuf->getStateProperties()->isType(FEATURESTATE))
		{
			for (unsigned int i = 0; i < getNumDiscreteStates(); i++)
			{
				unsigned int singleStateFeatureNum = (i / repetitions) % stateBuf->getNumDiscreteStates();
				featState->setDiscreteState(i, featState->getDiscreteState(i) + featureOffset * stateBuf->getDiscreteState(singleStateFeatureNum));
				featState->setContinuousState(i, featState->getContinuousState(i) * stateBuf->getContinuousState(singleStateFeatureNum));
			}
		}
		else
		{
			for (unsigned int i = 0; i < getNumDiscreteStates(); i++)
			{
				featState->setDiscreteState(i, featState->getDiscreteState(i) + featureOffset * stateBuf->getDiscreteState(0));				
			}
		}

		featureOffset = featureOffset * (*it)->getDiscreteStateSize();
	}
	normalizeFeatures(featState);
}
コード例 #4
0
/**
 * Function used to compute the features of the whole image
 * @return the sequence of vectors of features
 */
vector< VectorXd > FeatureExtractor::getFeatures(){
    int cols = im_bw.cols;
    vector< vector <double> > sequenceOfFeatures;
    vector< VectorXd > result;

    for(int j = 0; j < cols; ++j)
    {
        vector<double> winFeatures = computeFeaturesPerWindow(j);
        sequenceOfFeatures.push_back(winFeatures);
        result.push_back(convertToVectorXd(sequenceOfFeatures[j]));
    }

    return normalizeFeatures(result);
}
コード例 #5
0
void CFeatureOperatorOr::getModifiedState(CStateCollection *stateCol, CState *state)
{
	assert(bInit);

	std::list<CStateModifier *>::iterator it = getStateModifiers()->begin();
	std::list<CState *>::iterator stateIt = states->begin();

	CState *stateBuf;

	int i = 0;
	int numFeatures = 0;

	for (; it != getStateModifiers()->end(); it++, stateIt++)
	{
		if (stateCol->isMember(*it))
		{
			stateBuf = stateCol->getState(*it);
		}
		else
		{
			stateBuf = *stateIt;
			(*it)->getModifiedState(stateCol, stateBuf);
		}
		double featureStateFactor = (*this->featureFactors)[*it];
		if (stateBuf->getStateProperties()->isType(FEATURESTATE))
		{
			for (unsigned int j = 0; j < stateBuf->getNumDiscreteStates(); j++)
			{
				state->setDiscreteState(i, stateBuf->getDiscreteState(j) + numFeatures);
				state->setContinuousState(i, stateBuf->getContinuousState(j) * featureStateFactor);
				i ++;
			}
		}
		else
		{
			if (stateBuf->getStateProperties()->isType(DISCRETESTATE))
			{
				state->setDiscreteState(i, stateBuf->getDiscreteState(0) + numFeatures);
				state->setContinuousState(i, featureStateFactor);
				i ++;
			}
		}

		numFeatures += (*it)->getDiscreteStateSize();
	}
	normalizeFeatures(state);
}
コード例 #6
0
svmModel SMO::train()
{
    int passes = 0;
    int maxPasses = 25;
    int numChanged = 0;
    int examineAll = 1;
    alpha.resize(points.size(), 0);
    errorCache.resize(points.size(), 0);
    w.resize(points.size(), 0);
    threshold = 0;
    // Normalize the input points
    normalizeFeatures();
    // SMO outer loop:
    // Every iteration altranates between sweep through all points examineAll = 1 and sweep through non-boundary points examineAll = 0.
    while ((numChanged > 0 || examineAll) && (passes < maxPasses)) {
        numChanged = 0;
        if (examineAll) { 
            for (unsigned int i = 0; i < points.size(); i++)
            {
                if (plugin->isAborted() == true)
                {
                    plugin->progress.report("User Aborted", 0, ABORT, true);
                    return svmModel();
                }
                plugin->progress.report("Training SVM for class " + className, passes*100/maxPasses + (i+1)*100/maxPasses/points.size(), NORMAL);
                numChanged += examineExample (i);
            }
        }
        else { 
            for (unsigned int i = 0; i < points.size(); i++)
                if (alpha[i] != 0 && alpha[i] != C)
                {
                    if (plugin->isAborted() == true)
                    {
                        plugin->progress.report("User Aborted", 0, ABORT, true);
                        return svmModel();
                    }
                    plugin->progress.report("Training SVM for class " + className, passes*100/maxPasses + (i+1)*100/maxPasses/points.size(), NORMAL);
                    numChanged += examineExample (i);
                }
        }
        if (examineAll == 1)
            examineAll = 0;
        else if (numChanged == 0)
            examineAll = 1;
        /*       
        double s = 0.0;
        for (unsigned int i=0; i<points.size(); i++)
        s += alpha[i];
        double t = 0.;
        for (unsigned int i=0; i<points.size(); i++)
        for (unsigned int j=0; j<points.size(); j++)
        t += alpha[i]*alpha[j]*target[i]*target[j]*kernel(points[i],points[j]);
        double objFunc = (s - t/2.0); 
        plugin->progress.report(QString("The value of objective function should increase with each iteration.\n The value of objective function = %1").arg(objFunc).toStdString(), (passes*100)/maxPasses, NORMAL);
        */
        passes++;
    }
    plugin->progress.report("Finished training SVM for class " + className, 100, NORMAL, true);

    // Get the model for this class
    vector<double> m_alpha;
    vector<int> m_target;
    int numberOfsupportVectors = 0;
    vector<point> supportVectors;
    int attributes = points[0].size();
    for (unsigned int i = 0; i < alpha.size() ; i++)
    {
        if (alpha[i] > 0)
        {
            m_alpha.push_back(alpha[i]);
        }
    }
    for (unsigned int i = 0; i < alpha.size(); i++)
    {
        if (alpha[i] > 0)
        {
            numberOfsupportVectors++;
            supportVectors.push_back(points[i]);
            m_target.push_back(target[i]);
        }
    }
    svmModel model = svmModel(className, kernelType, threshold, attributes, w, sigma, numberOfsupportVectors, m_alpha, supportVectors, m_target, mu, stdv);

    // Compute the error rates.
    plugin->progress.report("Computing Error Rates using the model for class " + className, 0, NORMAL, true);
    double trainErrorRate = 0;
    double testErrorRate = 0;
    double crossValidationErrorRate = 0;

    for (unsigned int i = 0; i < points.size(); i++)
    {
        plugin->progress.report("Comuting Error Rates using the model for class " + className, (i+1)*60.0/points.size(), NORMAL, true);

        if (model.predict(points[i]) > 0 != target[i] > 0)
            trainErrorRate++;
    }
    trainErrorRate = trainErrorRate*100/points.size();

    for (unsigned int i = 0; i < testSet.size(); i++)
    {
        plugin->progress.report("Comuting Error Rates using the model for class " + className, 60 + (i+1)*20.0/testSet.size(), NORMAL, true);
        if (model.predict(testSet[i]) > 0 != yTest[i] > 0)
            testErrorRate++;
    }
    testErrorRate = testErrorRate*100/points.size();

    for (unsigned int i = 0; i < crossValidationSet.size(); i++)
    {
        plugin->progress.report("Comuting Error Rates using the model for class " + className, 80 + (i+1)*20/crossValidationSet.size(), NORMAL, true);
        if (model.predict(crossValidationSet[i]) > 0 != yCV[i] > 0)
            crossValidationErrorRate++;
    }
    crossValidationErrorRate = crossValidationErrorRate*100/crossValidationSet.size();

	if (testSet.size() && crossValidationSet.size())
		plugin->progress.report(QString("%1\nTrain error = %2\nCrossValidation error = %3\nTest error = %4\n").arg(className.c_str()).arg(trainErrorRate).arg(crossValidationErrorRate).arg(testErrorRate).toStdString(), 100, WARNING, true);
	else
		plugin->progress.report(QString("%1\nTrain error = %2").arg(className.c_str()).arg(trainErrorRate).toStdString(), 100, WARNING, true);

    return model;
}