Exemplo n.º 1
0
        /** Function for computing the potentials of the nodes and the edges in
         * the graph.
         */
        void computePotentials()
        {
            // Method steps:
            //  1. Compute node potentials
            //  2. Compute edge potentials

            //
            //  1. Node potentials
            //

            std::vector<CNodePtr>::iterator it;

            //cout << "NODE POTENTIALS" << endl;

            for ( it = m_nodes.begin(); it != m_nodes.end(); it++ )
            {
                CNodePtr nodePtr = *it;

                if ( !nodePtr->finalPotentials() )
                {
                    // Get the node type
                    //size_t type = nodePtr->getType()->getID();

                    // Compute the node potentials according to the node type and its
                    // extracted features

                    Eigen::VectorXd potentials = nodePtr->getType()->computePotentials( nodePtr->getFeatures() );

                    // Apply the node class multipliers
                    potentials = potentials.cwiseProduct( nodePtr->getClassMultipliers() );

                    /*Eigen::VectorXd fixed = nodePtr->getFixed();

                    potentials = potentials.cwiseProduct( fixed );*/

                    nodePtr->setPotentials( potentials );
                }

            }

            //
            //  2. Edge potentials
            //

            std::vector<CEdgePtr>::iterator it2;

            //cout << "EDGE POTENTIALS" << endl;

            for ( it2 = m_edges.begin(); it2 != m_edges.end(); it2++ )
            {
                CEdgePtr edgePtr = *it2;

                Eigen::MatrixXd potentials
                        = edgePtr->getType()->computePotentials( edgePtr->getFeatures() );

                edgePtr->setPotentials ( potentials );
            }

        }
Exemplo n.º 2
0
double compareT(Eigen::Isometry3d a, Eigen::Isometry3d b,
		Eigen::VectorXd weight)
{
	Eigen::Quaterniond qa(a.rotation());
	Eigen::Quaterniond qb(b.rotation());
	Eigen::Vector3d pa = a.translation();
	Eigen::Vector3d pb = b.translation();
	Eigen::VectorXd va(7), vb(7), verr(7), vScaled(7);
	va << pa, qa.x(), qa.y(), qa.z(), qa.w();
	vb << pb, qb.x(), qb.y(), qb.z(), qb.w();
	verr = vb - va;
	vScaled = weight.cwiseProduct(verr);
	return vScaled.squaredNorm();
}
Exemplo n.º 3
0
void UPGMpp::applyMaskToPotentials(CGraph &graph, map<size_t,vector<size_t> > &mask )
{
    vector<CNodePtr> &nodes = graph.getNodes();

    for ( size_t node_index = 0; node_index < nodes.size(); node_index++ )
    {
        CNodePtr nodePtr    = nodes[node_index];
        size_t nodeID       = nodePtr->getID();

        if ( mask.count(nodeID) )
        {
            Eigen::VectorXd nodePot = nodePtr->getPotentials();
            Eigen::VectorXd potMask( nodePot.rows() );
            potMask.fill(0);

            for ( size_t mask_index = 0; mask_index < mask[nodeID].size(); mask_index++ )
                potMask(mask[nodeID][mask_index]) = 1;

            nodePot = nodePot.cwiseProduct(potMask);

            nodePtr->setPotentials( nodePot );
        }
    }
}
Exemplo n.º 4
0
/**
* It multiply the result of the wheight Function with the Bias value
*
* @return it returns the result of the multiplication
**/
Eigen::VectorXd Product(Eigen::VectorXd weighedVector, Eigen::VectorXd biasVector){
 return weighedVector.cwiseProduct(biasVector);
}
Exemplo n.º 5
0
int main(int argc, char **argv) {
/*    NeuralNetwork nn(Eigen::Vector4d(3,3,3,1), 0.2, 0.1);

    typedef Eigen::Matrix<double,1,1> Vector1d;
    Vector1d rtrue;
    rtrue << 1;
    Vector1d rfalse;
    rfalse << 0;

    int NUM_ITERS = 100000;


    std::cout << "Beginning learning phase...\niteration 0";
    
    
    
    std::vector<std::pair<Eigen::VectorXd, Eigen::VectorXd>> training_data;
    training_data.push_back(std::make_pair<Eigen::VectorXd, Eigen::VectorXd>(Eigen::Vector3d(1,1,1), rtrue));
    training_data.push_back(std::make_pair<Eigen::VectorXd, Eigen::VectorXd>(Eigen::Vector3d(1,0,0), rtrue));
    training_data.push_back(std::make_pair<Eigen::VectorXd, Eigen::VectorXd>(Eigen::Vector3d(0,1,0), rtrue));
    training_data.push_back(std::make_pair<Eigen::VectorXd, Eigen::VectorXd>(Eigen::Vector3d(0,0,1), rtrue));
    training_data.push_back(std::make_pair<Eigen::VectorXd, Eigen::VectorXd>(Eigen::Vector3d(1,1,0), rfalse));
    training_data.push_back(std::make_pair<Eigen::VectorXd, Eigen::VectorXd>(Eigen::Vector3d(0,1,1), rfalse));
    training_data.push_back(std::make_pair<Eigen::VectorXd, Eigen::VectorXd>(Eigen::Vector3d(1,0,1), rfalse));
    training_data.push_back(std::make_pair<Eigen::VectorXd, Eigen::VectorXd>(Eigen::Vector3d(0,0,0), rfalse));
    
    nn.TrainBatch(training_data, 100000, 0.000001);
    
    std::cout <<"Learning done."<< std::endl;
    std::cout <<"Beginning testing phase...\n";

    std::cout << nn.Evaluate(Eigen::Vector3d(1,1,1)) << " vs " << 1 << std::endl;
    std::cout << nn.Evaluate(Eigen::Vector3d(1,1,0)) << " vs " << 0 << std::endl;
    std::cout << nn.Evaluate(Eigen::Vector3d(1,0,0)) << " vs " << 1 << std::endl;
    std::cout << nn.Evaluate(Eigen::Vector3d(1,0,1)) << " vs " << 0 << std::endl;
    std::cout << nn.Evaluate(Eigen::Vector3d(0,1,1)) << " vs " << 0 << std::endl;
    std::cout << nn.Evaluate(Eigen::Vector3d(0,1,0)) << " vs " << 1 << std::endl;
    std::cout << nn.Evaluate(Eigen::Vector3d(0,0,1)) << " vs " << 1 << std::endl;
    std::cout << nn.Evaluate(Eigen::Vector3d(0,0,0)) << " vs " << 0 << std::endl;
*/

    Reader reader;
    Eigen::VectorXd layers(4);
    layers << 10, 10, 5,1;
    NeuralNetwork nn(layers, 0.2, 0.1);
    
    std::cout << "Reading training data..." << std::flush;
    std::vector<std::pair<Eigen::VectorXd, Eigen::VectorXd>> training_data;
    std::cout << reader.ReadTrainingData("training_sample.csv",10,1,training_data) << " lines read." << std::endl;
    
    std::cout << "Beginning learning phase..." << std::endl;
    nn.TrainBatch(training_data, 50000, 0.001);
    std::cout << "End training phase..." << std::endl;
    nn.PrintWeights();
    std::cout << "Reading evaluation data..." << std::flush;
    std::vector<Eigen::VectorXd> evaluation_input_data;
    std::vector<Eigen::VectorXd> evaluation_output_data;
    
    int evaluation_rowcount = 0;
    std::cout << (evaluation_rowcount = reader.ReadEvaluationData("training_normalized.csv",10,1,evaluation_input_data, evaluation_output_data)) << " lines read." << std::endl;
    std::cout << "Beginning testing phase..." << std::endl;
    double cumulative_mse = 0;
    int success_count = 0;
    double success_square_error_threshold = 0.16;
    for(int i = 0; i < evaluation_rowcount; ++i) {
      Eigen::VectorXd output = nn.Evaluate(evaluation_input_data[i]);
      Eigen::VectorXd error = evaluation_output_data[i] - output;
      double square_error = error.cwiseProduct(error).sum() / error.size();
      cumulative_mse += square_error;
      if(square_error < success_square_error_threshold) {
	++success_count;
      }
      
      //std::cout << nn.Evaluate(evaluation_input_data[i]) << " vs. " << evaluation_output_data[i] << std::endl;
    }
    
    nn.PrintWeights();
    nn.PrintStates();
    
    
    std::cout << "End testing phase. Mean square error " << cumulative_mse/evaluation_rowcount << "\n";
    std::cout << success_count << " / " << evaluation_rowcount << " rows classified correctly (mse < " << success_square_error_threshold << ")\n";

    //std::cout << nn.Evaluate(evaluation_input_data[0]) << " vs. " << evaluation_output_data[0] << std::endl;
    //nn.PrintStates();
    //std::cout << nn.Evaluate(evaluation_input_data[1]) << " vs. " << evaluation_output_data[1] << std::endl;
    //nn.PrintStates();
    //std::cout << nn.Evaluate(evaluation_input_data[4]) << " vs. " << evaluation_output_data[4] << std::endl;
    //nn.PrintStates();
    
    return 0;
}
Exemplo n.º 6
0
/// <summary>
/// use a kernel matrix and solve the svm problem
/// </summary>
/// <param name="Kernel">precomputed kernel</param>
DualSolution libSVMWrapper::Solve(const Eigen::MatrixXd &Kernel)
{
	// unfortunately libsvm needs "svm_nodes", so we have to copy everything
	// workaround pointer to matrix entries (eigen3 does not allow it?)
	// copy entries (code from libSVM)
	int j = 0, sc = NumberOfData + 1;

	// TODO
	// libSVM_x_space[j+k].value = *(Kernel.data() + (k-1)*NumberOfData + i);  // by reference or pointer
	#pragma omp parallel for
	for (int i = 0; i < libSVM_Problem.l; i++)
	{
		j = (sc+1)*i ;
		for (int k = 0; k < sc; k++)
		{
			libSVM_x_space[j].index = k + 1;
			if (k == 0)
			{
				libSVM_x_space[j].value = i + 1;
			}
			else
			{
				libSVM_x_space[j+k].value = Kernel(i, k - 1);
			}

		}
		j = ((sc+1)*i+sc) ;
		libSVM_x_space[j+1].index = -1;
	}


	#ifdef DEBUG
	for (int i = 0; i < libSVM_Problem.l; i++)
	{
		if ((int) libSVM_Problem.x[i][0].value <= 0 || (int) libSVM_Problem.x[i][0].value > sc)
		{
			printf("Wrong input format: sample_serial_number out of range\n");
			exit(0);
		}
	}


	const char *error_msg;
	error_msg = svm_check_parameter(&libSVM_Problem, &libSVM_Parameter);
	if (error_msg)
	{
		fprintf(stderr, "ERROR: %s\n", error_msg);
		exit(1);
	}
	#endif

	// train the model
	if (libSVM_Model != NULL)
	{
		svm_free_model_content(libSVM_Model);
		libSVM_Model = NULL;
	}
	libSVM_Model = svm_train(&libSVM_Problem, &libSVM_Parameter);
	// extract results

	// bias is easy
	double Bias = -1 * libSVM_Model->rho[0];

	// alpha should be dense now
	Eigen::VectorXd Alpha = Eigen::MatrixXd::Zero(NumberOfData, 1);
	for (int i = 0; i < libSVM_Model->l; i++)
	{
		Alpha(libSVM_Model->sv_indices[i] - 1) = (libSVM_Model->sv_coef[0][i] < 0) ? -1 * libSVM_Model->sv_coef[0][i] : libSVM_Model->sv_coef[0][i];
	}

	DualSolution DS;
	DS.Bias = Bias;
	DS.Alpha = Alpha;

	Eigen::VectorXd tt = Alpha.cwiseProduct(Y);

	// objective value of dual solution
	DS.Value = Alpha.sum()    - 0.5* (double)(tt.transpose() * (Kernel*tt));


	return DS;

}