Exemple #1
0
void CGppe::Predict_CGppe_Laplace(double sigma, MatrixXd t, MatrixXd x, VectorXd idx_global, VectorXd ind_t, VectorXd ind_x,
                                MatrixXd tstar, MatrixXd test_pair)
{

    int Kt_ss = 1;
    double sigma_star, val;
    MatrixXd Kx_star, Kx_star_star, kstar, Kss, Css;
    MatrixXd Kt_star = covfunc_t->Compute(t, tstar);

    Kx_star = GetMatRow(Kx, test_pair.transpose()).transpose(); //maybe need some transpose?

    Kx_star_star = GetMat(Kx, test_pair.transpose(), test_pair.transpose()); // test to test
    kstar = Kron(Kt_star, Kx_star);
    kstar = GetMatRow(kstar, idx_global);

    Kss = Kt_ss * Kx_star_star;

    mustar = kstar.transpose() * Kinv * GetVec(f, idx_global);
    Css    = Kss - kstar.transpose() * W * llt.solve(Kinv * kstar);

    sigma_star = sqrt(Css(0, 0) + Css(1, 1) - 2 * Css(0, 1) + pow(sigma, 2));
    val = ( mustar(0) - mustar(1) ) / sigma_star;
    p   = normcdf(val);

}
Exemple #2
0
void testKroneckorTensorProduct()
{
  Matrix_t A(2, 2);
  A << 1, 2, 3, 4;
  std::cout << A << std::endl;

  Matrix_t Ones = Matrix_t::Ones(2, 2);
  std::cout << A << std::endl;

  Matrix_t C(A.rows() * Ones.rows(), A.cols() * Ones.cols());
  Eigen::KroneckerProduct<Matrix_t, Matrix_t> Kron(A, Ones);
  Kron.evalTo(C);

  std::cout << C << std::endl;
}
Exemple #3
0
void CGppe::Predictive_Utility_Distribution(MatrixXd t, MatrixXd tstar, int N, VectorXd idx_global)
{
    int Kt_ss = 1;
    VectorXd idx_xstar(N);
    MatrixXd Kstar, Kx_star_star, Kx_star, Kss, Css, Kt_star;
    for (int i = 0;i < N;i++)
    {
        idx_xstar(i) = i;
    }

    Kt_star = covfunc_t->Compute(t, tstar);
    Kx_star = GetMatRow(Kx, idx_xstar);//need to check for tranpose later?
    Kx_star_star = GetMat(Kx, idx_xstar, idx_xstar);

    Kstar = Kron(Kt_star, Kx_star);

    Kstar = GetMatRow(Kstar, idx_global);

    Kss = Kt_ss * Kx_star_star;
    mustar = Kstar.transpose() * Kinv * GetVec(f, idx_global);
    Css = Kss - Kstar.transpose() * W * llt.solve(Kinv * Kstar);
    varstar = Css.diagonal();
}
Exemple #4
0
double CGppe::maximum_expected_improvement(const VectorXd & theta_t, const VectorXd& theta_x, const double& sigma,
        const MatrixXd& t, const MatrixXd & x, const VectorXd& idx_global, const VectorXd& ind_t, const VectorXd& ind_x, MatrixXd tstar, int N, double fbest)
{
    VectorXd idx_xstar=Nfirst(N);
    int Kt_ss = 1;
    double  mei;
    MatrixXd Kx_star, Kx_star_star, kstar, Kss, Css;
    MatrixXd Kt_star = covfunc_t->Compute(t, tstar);
	//dsp(GetKinv(),"Kinv");


    Kx_star = GetMatRow(Kx, idx_xstar.transpose()); //maybe need some transpose?

    Kx_star_star = GetMat(Kx, idx_xstar.transpose(), idx_xstar.transpose()); // test to test
    kstar = Kron(Kt_star, Kx_star);

    kstar = GetMatRow(kstar, idx_global);
    Kss = Kt_ss * Kx_star_star;


    mustar = kstar.transpose() * Kinv * GetVec(f, idx_global);
    Css    = Kss - kstar.transpose() * W * llt.solve(Kinv * kstar);
    varstar = Css.diagonal();


    VectorXd sigmastar = sqrt(varstar.array());
    VectorXd z = (fbest - mustar.array()) / sigmastar.array();
    VectorXd pdfval = normpdf(z);
    VectorXd cdfval = normcdf(z);
    VectorXd inter = z.array() * (1 - cdfval.array());
    VectorXd el = sigmastar.cwiseProduct(inter - pdfval);

	el=-1*el;
    mei = el.maxCoeff();
    //dsp(mei,"mei");
    return mei;
}
int MCLR_SM::Active_Query()
{
	stop_training = false;

	// The max_info value and hence the algo converges if the user successively selects 
	//"I am not sure option" .. so whenever the user selects the "I am not sure option
	// delete that info value from max_info;
	if(current_label==0)
		max_info_vector.erase(max_info_vector.begin()+max_info_vector.size()-1);

	
	max_info = -1e9;
	int active_query = 0;
	//vnl_vector<double> diff_info_3_it(3,0);//difference in g vals for last 3 iterations
	//vnl_vector<double> g_3_it(3,0);	// g vals for the last three

	vnl_matrix<double> test_data_just_features =  test_data.transpose();
	//test_data_just_features = test_data_just_features.get_n_rows(0,test_data_just_features.rows()-1);
	vnl_matrix<double> prob = Test_Current_Model(test_data_just_features);
		

	vnl_matrix<double> test_data_bias = Add_Bias(test_data_just_features);
	vnl_vector<double> info_vector(test_data_just_features.cols());

	//Compute Information gain
	for(int i =0; i< test_data_just_features.cols();++i )
	{
		vnl_vector<double> temp_col_prob = prob.get_column(i);
		vnl_vector<double> temp_col_data = test_data_bias.get_column(i);		
		vnl_matrix<double> q = Kron(temp_col_prob,temp_col_data);//Kronecker Product
		vnl_diag_matrix<double> identity_matrix(no_of_classes,1); // Identity Matrix;
		double infoval = (q.transpose() * m.CRB.transpose() * q).get(0,0);
		vnl_matrix<double> info_matrix(no_of_classes,no_of_classes,infoval);
		info_matrix = identity_matrix + info_matrix ;
		info_vector(i) = log(vnl_determinant(info_matrix));
		if(info_vector(i)>max_info)
		{
			active_query = i;
			max_info = info_vector(i);
		}
	}

	max_info_vector.push_back(max_info);

//	std::cout<<max_info_vector.size() << "--" << max_info <<std::endl;

	if(max_info_vector.size()>1)
	  diff_info_3_it((max_info_vector.size()-1)%3) =  max_info_vector.at((max_info_vector.size()-1)) - max_info_vector.at((max_info_vector.size()-2));


	 info_3_it((max_info_vector.size()-1)%3) = max_info;	
	
	if (max_info_vector.size()>3) 
	{
		int sum = 0;
		for(int iter =0; iter < 3; iter++)
		{
			//std::cout<<fabs(diff_info_3_it(iter))/fabs(info_3_it(iter))<<std::endl;

			if(fabs(diff_info_3_it(iter))/fabs(info_3_it(iter)) < stop_cond(1))
			{
				sum = sum + 1;	
			}
		}
		// The algorithm is now confident about the problem
		// Training can be stopped
		if(sum==3)
			stop_training = true;
	}

	return active_query;
}