コード例 #1
0
ファイル: train.cpp プロジェクト: gyu-don/mnist_nn
bool Trainee::dump(const char *traindatapath)
{
    std::ofstream dat(traindatapath);

    if(!dat.good()) return false;
    dat << n_inputvec << ' ' << n_hid1vec << ' ' << n_hid2vec << ' ' << n_outputvec << '\n';

    int i, j;
    for(i=0;i<n_hid1vec;i++){
        for(j=0;j<n_inputvec-1;j++) dat << weight1(i, j) << ' ';
        dat << weight1(i, j) << '\n';
    }
    for(j=0;j<n_hid1vec-1;j++) dat << bias1(j) << ' ';
    dat << bias1(j) << '\n';

    for(i=0;i<n_hid2vec;i++){
        for(j=0;j<n_hid1vec-1;j++) dat << weight2(i, j) << ' ';
        dat << weight2(i, j) << '\n';
    }
    for(j=0;j<n_hid2vec-1;j++) dat << bias2(j) << ' ';
    dat << bias2(j) << '\n';

    for(i=0;i<n_outputvec;i++){
        for(j=0;j<n_hid2vec-1;j++) dat << weight3(i, j) << ' ';
        dat << weight3(i, j) << '\n';
    }
    for(j=0;j<n_outputvec-1;j++) dat << bias3(j) << ' ';
    dat << bias3(j) << '\n';

    if(!dat.good()) return false;
    return true;
}
コード例 #2
0
ファイル: train.cpp プロジェクト: gyu-don/mnist_nn
Trainee::Trainee(int n_hid1, int n_hid2, float init_sigma)
{
    n_inputvec = MNISTreader::pixelSize;
    n_hid1vec = n_hid1;
    n_hid2vec = n_hid2;
    n_outputvec = 10;

    gsq_w1 = Eigen::ArrayXXf::Zero(n_hid1vec, n_inputvec);
    gsq_b1 = Eigen::ArrayXf::Zero(n_hid1vec);
    gsq_w2 = Eigen::ArrayXXf::Zero(n_hid2vec, n_hid1vec);
    gsq_b2 = Eigen::ArrayXf::Zero(n_hid2vec);
    gsq_w3 = Eigen::ArrayXXf::Zero(n_outputvec, n_hid2vec);
    gsq_b3 = Eigen::ArrayXf::Zero(n_outputvec);

	std::random_device rd;
    std::mt19937 mt(rd());
    std::normal_distribution<float> nd(0.0, init_sigma);

    weight1 = Eigen::MatrixXf(n_hid1vec, n_inputvec);
    for(int i=0;i<n_hid1vec;i++) for(int j=0;j<n_inputvec;j++) weight1(i, j) = nd(mt);
    bias1 = Eigen::VectorXf::Zero(n_hid1vec);

    weight2 = Eigen::MatrixXf(n_hid2vec, n_hid1vec);
    for(int i=0;i<n_hid2vec;i++) for(int j=0;j<n_hid1vec;j++) weight2(i, j) = nd(mt);
    bias2 = Eigen::VectorXf::Zero(n_hid2vec);

    weight3 = Eigen::MatrixXf(n_outputvec, n_hid2vec);
    for(int i=0;i<n_outputvec;i++) for(int j=0;j<n_hid2vec;j++) weight3(i, j) = nd(mt);
    bias3 = Eigen::VectorXf::Zero(n_outputvec);
}
コード例 #3
0
void AncillaryMethods::intersectIdx(Vector< FrameInlier >& idx1, Vector< FrameInlier >& idx2, Vector< FrameInlier >& intersection) {

    int j = 0;
    int k = 0;
    intersection.clearContent();
    Vector<int> inter;
    Vector<int> inlier1;
    Vector<int> inlier2;
    Vector<double> weight1;
    Vector<double> weight2;

    if (idx1.getSize() > 0 && idx2.getSize() > 0) {
        for (int i = 0; i < idx1.getSize(); i++) {

            while (idx1(i).getFrame() > idx2(j).getFrame() && j < idx2.getSize() - 1) {
                j++;
            }

            if (j == idx2.getSize())
                break;

            if (idx1(i).getFrame() == idx2(j).getFrame()) {
                inlier1 = idx1(i).getInlier();
                inlier2 = idx2(j).getInlier();

                weight1 = idx1(i).getWeight();
                weight2 = idx2(j).getWeight();

                inlier1.intersection(inlier2, inter);

                if (inter.getSize() > 0) {
                    FrameInlier inlier(idx1(i).getFrame());

                    for ( int l = 0; l < inlier1.getSize(); l++) {
                        while (inlier1(l) > inlier2(k) && k < inlier2.getSize() - 1) {
                            k++;
                        }

                        if (k == inlier2.getSize())
                            break;

                        if (inlier1(l) == inlier2(k)) {
                            inlier.addInlier(inlier1(l));
                            inlier.addWeight(weight1(l));
                            inlier.addWeight(weight2(k));
                        }
                    }
                    k = 0;
                    intersection.pushBack(inlier);
                }
            }
        }
    }
}
コード例 #4
0
ファイル: xStats.cpp プロジェクト: ianmartin/GPSTk
/*
****Test to assess the quality of the Add member of the Stats
****class which is designed to add a gpstk::Vector or an individual 
****sample to an instance of Stats

**** This test partially indirectly tests Minimum, Maximum
**** Average,Variance,StdDev which are all memebers of the
**** Stats class
*/
void xStats :: addTest (void)
{
	gpstk::Stats<double> addTest;
	CPPUNIT_ASSERT_NO_THROW(addTest.Add(10.,1));
	
	addTest.Add(20.,1);
	addTest.Add(30.,1);
	//Create a vector of 1 member with a value of 40.0
	gpstk::Vector<double> one(1,40.);
	//Create a vector with one member with a value of 1
	gpstk::Vector<double> weight(1,1);
	
	
	CPPUNIT_ASSERT_NO_THROW(addTest.Add(one,weight));
	
	CPPUNIT_ASSERT_EQUAL((unsigned) 4, addTest.N());
	CPPUNIT_ASSERT_EQUAL(10., addTest.Minimum());
	CPPUNIT_ASSERT_EQUAL(40., addTest.Maximum());
	CPPUNIT_ASSERT_DOUBLES_EQUAL(25., addTest.Average(),1E-6);
	CPPUNIT_ASSERT_DOUBLES_EQUAL(166.66667, addTest.Variance(),1E-3);
	CPPUNIT_ASSERT_DOUBLES_EQUAL(12.9099445, addTest.StdDev(),1E-6);
	CPPUNIT_ASSERT_EQUAL(4., addTest.Normalization());
	CPPUNIT_ASSERT_EQUAL(true, addTest.Weighted());
	
	gpstk::Stats<double>  addTest0;
	gpstk::Vector<double> weight0(0,1);
	addTest0.Add(10.,0);
	addTest0.Add(20.,0);
	addTest0.Add(30.,0);
	addTest0.Add(one,weight0);
	
	CPPUNIT_ASSERT_EQUAL((unsigned) 4, addTest0.N());
	CPPUNIT_ASSERT_EQUAL(10., addTest0.Minimum());
	CPPUNIT_ASSERT_EQUAL(40., addTest0.Maximum());
	CPPUNIT_ASSERT_DOUBLES_EQUAL(25., addTest0.Average(),1E-6);
	CPPUNIT_ASSERT_DOUBLES_EQUAL(166.66667, addTest0.Variance(),1E-3);
	CPPUNIT_ASSERT_DOUBLES_EQUAL(12.9099445, addTest0.StdDev(),1E-6);
	CPPUNIT_ASSERT_EQUAL(0., addTest0.Normalization());
	CPPUNIT_ASSERT_EQUAL(false, addTest0.Weighted());
	
	
	gpstk::Vector<double> two(4,20.);
	gpstk::Vector<double> weight2(1,0);
	try
	{
		CPPUNIT_ASSERT_THROW(addTest.Add(two,weight2),gpstk::Exception);
	}
	catch(gpstk::Exception& e)
	{
	}
	
}
コード例 #5
0
ファイル: train.cpp プロジェクト: gyu-don/mnist_nn
void Trainee::train(std::vector<std::pair<InputType, AnswerType>> minibatch, float learning_rate)
{
    Eigen::MatrixXf dweight3 = Eigen::MatrixXf::Zero(n_outputvec, n_hid2vec);
    Eigen::VectorXf dbias3 = Eigen::VectorXf::Zero(n_outputvec);
    Eigen::MatrixXf dweight2 = Eigen::MatrixXf::Zero(n_hid2vec, n_hid1vec);
    Eigen::VectorXf dbias2 = Eigen::VectorXf::Zero(n_hid2vec);
    Eigen::MatrixXf dweight1 = Eigen::MatrixXf::Zero(n_hid1vec, n_inputvec);
    Eigen::VectorXf dbias1 = Eigen::VectorXf::Zero(n_hid1vec);

    /* For AdaGrad */
    auto fn = [](float lhs, float rhs) -> float { return lhs != 0.0 ? lhs / rhs : 0.0; };

    for(auto sample: minibatch){
        Eigen::VectorXf inputvec = input2vec(sample.first);
        Eigen::VectorXf z1 = feedforward(inputvec, 1);
        Eigen::VectorXf z2 = feedforward(inputvec, 2);  // 後付けとはいえ。この計算、あからさまに無駄だな。z1からz2を計算すべき。

        // Calculate delta of output layer.
        Eigen::VectorXf delta3;
        delta3 = feedforward(inputvec, 3);
        delta3(sample.second) -= 1.0f;
        {
            Eigen::ArrayXXf e = delta3 * z2.transpose();
            gsq_w3 += e * e;
            gsq_b3 += delta3.array() * delta3.array();
            dweight3 += e.matrix();
            dbias3 += delta3;
        }

        // Calculate delta of 2nd hidden layer.
        Eigen::VectorXf delta2 = Eigen::VectorXf::Zero(n_hid2vec);
        for(int j=0;j<n_hid2vec;j++){
            for(int k=0;k<n_outputvec;k++) delta2(j) += delta3(k) * weight3(k, j) * (z2(j) >= 0.f ? 1.f : 0.f);
        }
        {
            Eigen::ArrayXXf e = delta2 * z1.transpose();
            gsq_w2 += e * e;
            gsq_b2 += delta2.array() * delta2.array();
            dweight2 += e.matrix();
            dbias2 += delta2;
        }

        // Calculate delta of 1st hidden layer.
        Eigen::VectorXf delta1 = Eigen::VectorXf::Zero(n_hid1vec);
        for(int j=0;j<n_hid1vec;j++){
            for(int k=0;k<n_hid2vec;k++) delta1(j) += delta2(k) * weight2(k, j) * (z1(j) >= 0.f ? 1.f : 0.f);
        }
        {
            Eigen::ArrayXXf e = delta1 * inputvec.transpose();
            gsq_w1 += e * e;
            gsq_b1 += delta1.array() * delta1.array();
            dweight1 += e.matrix();
            dbias1 += delta1;
        }
    }
    weight1 -= dweight1.binaryExpr(gsq_w1.sqrt().matrix(), fn) * learning_rate / minibatch.size();
    bias1 -= dbias1.binaryExpr(gsq_b1.sqrt().matrix(), fn) * learning_rate / minibatch.size();
    weight2 -= dweight2.binaryExpr(gsq_w2.sqrt().matrix(), fn) * learning_rate / minibatch.size();
    bias2 -= dbias2.binaryExpr(gsq_b2.sqrt().matrix(), fn) * learning_rate / minibatch.size();
    weight3 -= dweight3.binaryExpr(gsq_w3.sqrt().matrix(), fn) * learning_rate / minibatch.size();
    bias3 -= dbias3.binaryExpr(gsq_b3.sqrt().matrix(), fn) * learning_rate / minibatch.size();
}