Пример #1
0
		virtual double operator() (double x)
		{
			Eigen::Map<Eigen::VectorXd> eigen_f(f->vector, f->vlen);
			Eigen::Map<Eigen::VectorXd> eigen_m(m->vector, m->vlen);

			*alpha = start_alpha + x*(*dalpha);
			(eigen_f) = (*K)*(*alpha)*scale*scale+(eigen_m);


			for (index_t i = 0; i < eigen_f.rows(); i++)
				(*f)[i] = eigen_f[i];

			(*dl1) = lik->get_log_probability_derivative_f(lab, (*f), 1);
			(*mW) = lik->get_log_probability_derivative_f(lab, (*f), 2);
			float64_t result = ((*alpha).dot(((eigen_f)-(eigen_m))))/2.0;

			for (index_t i = 0; i < (*mW).vlen; i++)
				(*mW)[i] = -(*mW)[i];



			result -= lik->get_log_probability_f(lab, *f);

			return result;
		}
Пример #2
0
SGVector<float64_t> CGaussianProcessRegression::get_mean_vector()
{

	SGVector<float64_t> m_alpha = m_method->get_alpha();

	CMeanFunction* mean_function = m_method->get_mean();

	SGMatrix<float64_t> features = ((CDotFeatures*)m_data)->
			get_computed_dot_feature_matrix();

	if (!mean_function)
		SG_ERROR("Mean function is NULL!\n");


	SGVector<float64_t> means = mean_function->get_mean_vector(features);
	
	SGVector< float64_t > result_vector(features.num_cols);

	//Here we multiply K*^t by alpha to receive the mean predictions.
	cblas_dgemv(CblasColMajor, CblasTrans, m_k_trts.num_rows,
		    m_alpha.vlen, 1.0, m_k_trts.matrix,
		    m_k_trts.num_cols, m_alpha.vector, 1, 0.0,
		    result_vector.vector, 1);

	for (index_t i = 0; i < result_vector.vlen; i++)
		result_vector[i] += means[i];

	CLikelihoodModel* lik = m_method->get_model();

	result_vector = lik->evaluate_means(result_vector);

	SG_UNREF(lik);

	return result_vector;
}
Пример #3
0
SGVector<float64_t> CGaussianProcessRegression::getMeanVector()
{

	SGVector<float64_t> m_alpha = m_method->get_alpha();

	SGVector< float64_t > result_vector(m_labels->get_num_labels());
	
	//Here we multiply K*^t by alpha to receive the mean predictions.
	cblas_dgemv(CblasColMajor, CblasTrans, m_k_trts.num_rows,
		    m_alpha.vlen, 1.0, m_k_trts.matrix,
		    m_k_trts.num_cols, m_alpha.vector, 1, 0.0,
		    result_vector.vector, 1);
	
	CLikelihoodModel* lik = m_method->get_model();

	result_vector = lik->evaluate_means(result_vector);

	SG_UNREF(lik);

	return result_vector;
}
Пример #4
0
SGVector<float64_t> CGaussianProcessRegression::getCovarianceVector()
{

	if (!m_data)
		SG_ERROR("No testing features!\n");

	SGVector<float64_t> diagonal = m_method->get_diagonal_vector();
	SGVector<float64_t> diagonal2(m_data->get_num_vectors());

	SGMatrix<float64_t> temp1(m_data->get_num_vectors(), diagonal.vlen);

	SGMatrix<float64_t> m_L = m_method->get_cholesky();

	SGMatrix<float64_t> temp2(m_L.num_rows, m_L.num_cols);

	for (index_t i = 0; i < diagonal.vlen; i++)
	{
		for (index_t j = 0; j < m_data->get_num_vectors(); j++)
			temp1(j,i) = diagonal[i]*m_k_trts(j,i);
	}

	for (index_t i = 0; i < diagonal2.vlen; i++)
		diagonal2[i] = 0;

	memcpy(temp2.matrix, m_L.matrix,
			m_L.num_cols*m_L.num_rows*sizeof(float64_t));


	temp2.transpose_matrix(temp2.matrix, temp2.num_rows, temp2.num_cols);

	SGVector<int32_t> ipiv(temp2.num_cols);

	//Solve L^T V = K(Train,Test)*Diagonal Vector Entries for V
	clapack_dgetrf(CblasColMajor, temp2.num_rows, temp2.num_cols,
			temp2.matrix, temp2.num_cols, ipiv.vector);

	clapack_dgetrs(CblasColMajor, CblasNoTrans,
	                   temp2.num_rows, temp1.num_cols, temp2.matrix,
	                   temp2.num_cols, ipiv.vector, temp1.matrix,
	                   temp1.num_cols);

	for (index_t i = 0; i < temp1.num_rows; i++)
	{
		for (index_t j = 0; j < temp1.num_cols; j++)
			temp1(i,j) = temp1(i,j)*temp1(i,j);
	}

	for (index_t i = 0; i < temp1.num_cols; i++)
	{
		diagonal2[i] = 0;

		for (index_t j = 0; j < temp1.num_rows; j++)
			diagonal2[i] += temp1(j,i);
	}


	SGVector<float64_t> result(m_k_tsts.num_cols);

	//Subtract V from K(Test,Test) to get covariances.
	for (index_t i = 0; i < m_k_tsts.num_cols; i++)
		result[i] = m_k_tsts(i,i) - diagonal2[i];

	CLikelihoodModel* lik = m_method->get_model();

	SG_UNREF(lik);

	return lik->evaluate_variances(result);
}