コード例 #1
0
//-- Training
//------------------------------------------------------------------
void NNTrainer::trainNetwork()
{
    //-- Training parameters:
    //---------------------------------------------------------------
    //-- Randomize weights
    std::cout << "Randomize weights...";
    randomWeights();
    std::cout  << "\033[0;32m" << "[ok]" << "\033[0m" << std::endl
	       << "Starting Gradient Descend... " << std::endl << std::endl;

    //-- Calculate the frequency of information update:
    int showPeriod = iter > 10 ? iter / 10 : iter;

    //-- Start gradient descend:
    for (int i = 0; i < iter; i++)
    {
	//-- Calculate gradient:
	std::vector<double> grad = gradient( global_lambda );

	//-- Convert the gradient in matrices:
	std::vector<Matrix *> matGrad = unrolledToMatrices( grad );

	//-- Operate matrices
	for (int l = 0; l < (int) nn->getWeights().size(); l++)
	    *nn->getWeights().at(l) = ( *nn->getWeights().at(l) ) - (*matGrad.at(l) * alpha) ;

	//-- Deallocate memory:
	for (int l = (int) matGrad.size() - 1; l >= 0; l--)
	    delete matGrad.at(l);

	//-- Periodically, show percentage completed and accuracy:
	if ( (double) (i % showPeriod) == 0)
	{
	    std::cout << "\033[1A" <<  "\033[K";// << "\033[0;0H";
	    std::cout << "Completed: "<< "\033[0;31m" <<   (i / (double) iter ) * 100 << "%"  << "\033[0m"
		      << " Current accuracy: "  << "\033[0;31m" << accuracy() * 100 << "%" << "\033[0m"
		      << " Current cost: "  << "\033[0;31m" << costFunction(global_lambda)  << "\033[0m" << std::endl;
	}

    }

    std::cout << "Done. Final accuracy: " << accuracy() * 100 << "%" << std::endl
	      << "Press enter to continue." << std::endl;

    std::cin.ignore();


}
コード例 #2
0
ファイル: NN_File.cpp プロジェクト: vHanda/Survival
//
// Training the Neural Network
//
void NN_File::train(const std::string & trainingSet)
{
    TiXmlDocument doc(trainingSet);
    doc.LoadFile();

    TiXmlElement * pRoot = doc.RootElement();
    loadNetwork(pRoot->FirstChildElement("neuralnetwork"));

    randomWeights();

    double minError;
    pRoot->Attribute("error", &minError);
    minError /= 100.0;

    // Training
    float error = 100.0;
    while( error > minError )
    {
        error = train(pRoot);
        std::cout << "Error : " << error *100 << std::endl;
    }
}
コード例 #3
0
int InitRandomWeights::calcWeights(pvdata_t * dataStart, int dataPatchIndex, int arborId) {
   return randomWeights(dataStart, weightParams, dataPatchIndex); // RNG depends on dataPatchIndex but not on arborId.
}
コード例 #4
0
bool NNTrainer::checkGradient( const double lambda)
{
    //-- Routine to check the gradient
    //------------------------------------------------------------------------------------
    //-- Store the previous configuration:
    NeuralNetwork* prevNN = nn;
    std::vector<Matrix *> prevWeights = nn->getWeights();
    std::vector<TrainingExample> *prevTraining = trainingSet;

    //-- New configuration:

    //--New neural network:
    int array[] = {3, 5, 3};
    std::vector<int> newDim(array, array+sizeof(array)/sizeof(int));

    NeuralNetwork newNN( newDim );

    //-- New weight matrices:
    Matrix mat1( 5, 4), mat2( 3, 6);
    std::vector< Matrix *> newWeights;
    newWeights.push_back( &mat1);
    newWeights.push_back( &mat2);

    //-- Set new configuration:
    newNN.setWeights( newWeights);
    nn = &newNN;


    //-- New training set (random, of course)
    std::vector<TrainingExample> newTS;

    for (int i = 0; i < 5; i++)
    {
	//-- Construct input
	std::vector<double> x;

	for(int j = 0; j < 3; j++)
	{
	    x.push_back( 2* 0.14 *((rand()/(float)RAND_MAX)-0.5) );
	}

	//-- Construct expected output:
	std::vector<double> y;
	for(int j = 0; j < 3; j++)
	{
	    if ( j == i )
		y.push_back(1);
	    else if ( i > j && i-3 == j)
		y.push_back(1);
	    else
		y.push_back(0);
	}

	//-- Construct training example:
	TrainingExample aux;
	aux.x = x;
	aux.y = y;

	//-- Append example:
	newTS.push_back( aux);
    }
    //-- Set new training set
    trainingSet = &newTS;

    //-- Randomize weights:
    randomWeights();

       //-- Calculate gradients:
    std::vector<double> backprop = gradient( lambda );
    std::vector<double> numerical = numericalGradient( lambda );

    std::cout << "Backprop gradient:" << std::endl;
    std::cout << backprop << std::endl;
    std::cout << "Numerical gradient:" << std::endl;
    std::cout << numerical << std::endl;

    //-- Calculate relative deviation:
    std::vector<double> sum, difference;

    for (int i= 0; i < (int) backprop.size(); i++)
    {
	sum.push_back( pow( ( numerical.at(i)) + backprop.at(i)  , 2)  );
	difference.push_back( pow( ( numerical.at(i) - backprop.at(i)) , 2) );
    }

    double modSum = 0, modDif = 0;
    for (int i= 0; i < (int) backprop.size(); i++)
    {
	modSum+=sum.at(i);
	modDif+=difference.at(i);
    }

    std::cout << "Relative difference is: " << sqrt(modDif) / sqrt(modSum) << std::endl;

    //--Restore the previous configuration:
    nn = prevNN;
    nn->setWeights( prevWeights );
    trainingSet = prevTraining;

    //-- Return if passed the test or not:
    if ( sqrt(modDif) / sqrt(modSum) < 1e-4 )
	return true;
    else
	return false;
}