void FunctionApproximatorGPR::predictVariance(const MatrixXd& inputs, MatrixXd& variances) { if (!isTrained()) { cerr << "WARNING: You may not call FunctionApproximatorLWPR::predict if you have not trained yet. Doing nothing." << endl; return; } const ModelParametersGPR* model_parameters_gpr = static_cast<const ModelParametersGPR*>(getModelParameters()); assert(inputs.cols()==getExpectedInputDim()); unsigned int n_samples = inputs.rows(); variances.resize(n_samples,1); MatrixXd ks; model_parameters_gpr->kernelActivations(inputs, ks); double maximum_covariance = model_parameters_gpr->maximum_covariance(); MatrixXd gram_inv = model_parameters_gpr->gram_inv(); for (unsigned int ii=0; ii<n_samples; ii++) variances(ii) = maximum_covariance - (ks.row(ii)*gram_inv).dot(ks.row(ii).transpose()); }
void FunctionApproximatorGPR::train(const MatrixXd& inputs, const MatrixXd& targets) { if (isTrained()) { cerr << "WARNING: You may not call FunctionApproximatorGPR::train more than once. Doing nothing." << endl; cerr << " (if you really want to retrain, call reTrain function instead)" << endl; return; } assert(inputs.rows() == targets.rows()); assert(inputs.cols()==getExpectedInputDim()); const MetaParametersGPR* meta_parameters_gpr = dynamic_cast<const MetaParametersGPR*>(getMetaParameters()); double max_covar = meta_parameters_gpr->maximum_covariance(); VectorXd sigmas = meta_parameters_gpr->sigmas(); // Compute the gram matrix // In a gram matrix, every input point is itself a center MatrixXd centers = inputs; // Replicate sigmas, because they are the same for each data point/center MatrixXd widths = sigmas.transpose().colwise().replicate(centers.rows()); MatrixXd gram(inputs.rows(),inputs.rows()); bool normalize_activations = false; bool asymmetric_kernels = false; BasisFunction::Gaussian::activations(centers,widths,inputs,gram,normalize_activations,asymmetric_kernels); gram *= max_covar; setModelParameters(new ModelParametersGPR(inputs,targets,gram,max_covar,sigmas)); }
MetaParametersLWPR* MetaParametersLWPR::clone(void) const { return new MetaParametersLWPR( getExpectedInputDim(), init_D_, w_gen_, w_prune_, update_D_, init_alpha_, penalty_, diag_only_, use_meta_, meta_rate_, kernel_name_ ); }
ModelParametersGMR::ModelParametersGMR(std::vector<double> priors, std::vector<Eigen::VectorXd> means_x, std::vector<Eigen::VectorXd> means_y, std::vector<Eigen::MatrixXd> covars_x, std::vector<Eigen::MatrixXd> covars_y, std::vector<Eigen::MatrixXd> covars_y_x) : priors_(priors), means_x_(means_x), means_y_(means_y), covars_x_(covars_x), covars_y_(covars_y), covars_y_x_(covars_y_x) { size_t n_gaussians = priors.size(); #ifndef NDEBUG // Check for NDEBUG to avoid 'unused variable' warnings for n_dims_in and n_dims_out. assert(n_gaussians>0); assert(means_x_.size() == n_gaussians); assert(means_y_.size() == n_gaussians); assert(covars_x_.size() == n_gaussians); assert(covars_y_.size() == n_gaussians); assert(covars_y_x_.size() == n_gaussians); int n_dims_in = getExpectedInputDim(); for (size_t i = 0; i < n_gaussians; i++) { assert(means_x_[i].size() == n_dims_in); assert(covars_x_[i].rows() == n_dims_in); assert(covars_x_[i].cols() == n_dims_in); assert(covars_y_x_[i].cols() == n_dims_in); } int n_dims_out = means_y_[0].size(); for (size_t i = 0; i < n_gaussians; i++) { assert(covars_y_[i].rows() == n_dims_out); assert(covars_y_[i].cols() == n_dims_out); assert(covars_y_x_[i].rows() == n_dims_out); } #endif for (unsigned int i=0; i<n_gaussians; i++) covars_x_inv_.push_back(covars_x_[i].inverse()); all_values_vector_size_ = 0; // NEW REPRESENTATION // all_values_vector_size_ += n_gaussians; // all_values_vector_size_ += n_gaussians * n_dims_in; // all_values_vector_size_ += n_gaussians * n_dims_out; // all_values_vector_size_ += n_gaussians * n_dims_in * n_dims_in; // all_values_vector_size_ += n_gaussians * n_dims_out * n_dims_out; // all_values_vector_size_ += n_gaussians * n_dims_out * n_dims_in; };
void FunctionApproximatorIRFRLS::train(const MatrixXd& inputs, const MatrixXd& targets) { if (isTrained()) { cerr << "WARNING: You may not call FunctionApproximatorIRFRLS::train more than once. Doing nothing." << endl; cerr << " (if you really want to retrain, call reTrain function instead)" << endl; return; } assert(inputs.rows() == targets.rows()); // Must have same number of examples assert(inputs.cols()==getExpectedInputDim()); const MetaParametersIRFRLS* meta_parameters_irfrls = static_cast<const MetaParametersIRFRLS*>(getMetaParameters()); int nb_cos = meta_parameters_irfrls->number_of_basis_functions_; // Init random generator. boost::mt19937 rng(getpid() + time(0)); // Draw periodes boost::normal_distribution<> twoGamma(0, sqrt(2 * meta_parameters_irfrls->gamma_)); boost::variate_generator<boost::mt19937&, boost::normal_distribution<> > genPeriods(rng, twoGamma); MatrixXd cosines_periodes(nb_cos, inputs.cols()); for (int r = 0; r < nb_cos; r++) for (int c = 0; c < inputs.cols(); c++) cosines_periodes(r, c) = genPeriods(); // Draw phase boost::uniform_real<> twoPi(0, 2 * boost::math::constants::pi<double>()); boost::variate_generator<boost::mt19937&, boost::uniform_real<> > genPhases(rng, twoPi); VectorXd cosines_phase(nb_cos); for (int r = 0; r < nb_cos; r++) cosines_phase(r) = genPhases(); MatrixXd proj_inputs; proj(inputs, cosines_periodes, cosines_phase, proj_inputs); // Compute linear model analatically double lambda = meta_parameters_irfrls->lambda_; MatrixXd toInverse = lambda * MatrixXd::Identity(nb_cos, nb_cos) + proj_inputs.transpose() * proj_inputs; VectorXd linear_model = toInverse.inverse() * (proj_inputs.transpose() * targets); setModelParameters(new ModelParametersIRFRLS(linear_model, cosines_periodes, cosines_phase)); }
void FunctionApproximatorGPR::predict(const MatrixXd& inputs, MatrixXd& outputs) { if (!isTrained()) { cerr << "WARNING: You may not call FunctionApproximatorLWPR::predict if you have not trained yet. Doing nothing." << endl; return; } const ModelParametersGPR* model_parameters_gpr = static_cast<const ModelParametersGPR*>(getModelParameters()); assert(inputs.cols()==getExpectedInputDim()); unsigned int n_samples = inputs.rows(); outputs.resize(n_samples,1); MatrixXd ks; model_parameters_gpr->kernelActivations(inputs, ks); VectorXd weights = model_parameters_gpr->weights(); for (unsigned int ii=0; ii<n_samples; ii++) outputs(ii) = ks.row(ii).dot(weights); }
void UnifiedModel::getParameterVectorAll(VectorXd& values) const { values.resize(getParameterVectorAllSize()); int offset = 0; int n_basis_functions = centers_.size(); int n_dims = getExpectedInputDim(); for (int i_bfs=0; i_bfs<n_basis_functions; i_bfs++) { values.segment(offset,n_dims) = centers_[i_bfs]; offset += n_dims; values.segment(offset,n_dims) = covars_[i_bfs].diagonal(); offset += n_dims; values.segment(offset,n_dims) = slopes_[i_bfs]; offset += n_dims; values[offset] = offsets_[i_bfs]; offset += 1; values[offset] = priors_[i_bfs]; offset += 1; } /* Dead code. But kept in for reference in case slopes_as_angles_ will be implemented VectorXd cur_slopes; for (int i_dim=0; i_dim<slopes_.cols(); i_dim++) { cur_slopes = slopes_.col(i_dim); if (slopes_as_angles_) { // cur_slopes is a slope, but the values vector expects the angle with the x-axis. Do the // conversion here. for (int ii=0; ii<cur_slopes.size(); ii++) cur_slopes[ii] = atan2(cur_slopes[ii],1.0); } values.segment(offset,slopes_.rows()) = cur_slopes; offset += slopes_.rows(); } */ assert(offset == getParameterVectorAllSize()); };
void FunctionApproximator::train(const MatrixXd& inputs, const MatrixXd& targets, string save_directory, bool overwrite) { train(inputs,targets); if (save_directory.empty()) return; if (!isTrained()) return; if (getExpectedInputDim()<3) { VectorXd min = inputs.colwise().minCoeff(); VectorXd max = inputs.colwise().maxCoeff(); int n_samples_per_dim = 100; if (getExpectedInputDim()==2) n_samples_per_dim = 40; VectorXi n_samples_per_dim_vec = VectorXi::Constant(getExpectedInputDim(),n_samples_per_dim); model_parameters_->saveGridData(min, max, n_samples_per_dim_vec, save_directory, overwrite); } MatrixXd outputs; predict(inputs,outputs); saveMatrix(save_directory,"inputs.txt",inputs,overwrite); saveMatrix(save_directory,"targets.txt",targets,overwrite); saveMatrix(save_directory,"outputs.txt",outputs,overwrite); string filename = save_directory+"/plotdata.py"; ofstream outfile; outfile.open(filename.c_str()); if (!outfile.is_open()) { cerr << __FILE__ << ":" << __LINE__ << ":"; cerr << "Could not open file " << filename << " for writing." << endl; } else { // Python code generation in C++. Rock 'n' roll! ;-) if (inputs.cols()==2) { outfile << "from mpl_toolkits.mplot3d import Axes3D \n"; } outfile << "import numpy \n"; outfile << "import matplotlib.pyplot as plt \n"; outfile << "directory = '" << save_directory << "' \n"; outfile << "inputs = numpy.loadtxt(directory+'/inputs.txt') \n"; outfile << "targets = numpy.loadtxt(directory+'/targets.txt') \n"; outfile << "outputs = numpy.loadtxt(directory+'/outputs.txt') \n"; outfile << "fig = plt.figure() \n"; if (inputs.cols()==2) { outfile << "ax = Axes3D(fig) \n"; outfile << "ax.plot(inputs[:,0],inputs[:,1],targets, '.', label='targets',color='black') \n"; outfile << "ax.plot(inputs[:,0],inputs[:,1],outputs, '.', label='predictions',color='red')\n"; outfile << "ax.set_xlabel('input_1'); ax.set_ylabel('input_2'); ax.set_zlabel('output') \n"; outfile << "ax.legend(loc='lower right') \n"; } else { outfile << "plt.plot(inputs,targets, '.', label='targets',color='black') \n"; outfile << "plt.plot(inputs,outputs, '.', label='predictions',color='red') \n"; outfile << "plt.xlabel('input'); plt.ylabel('output'); \n"; outfile << "plt.legend(loc='lower right') \n"; } outfile << "plt.show() \n"; outfile << endl; outfile.close(); //cout << " ______________________________________________________________" << endl; //cout << " | Plot saved data with:" << " 'python " << filename << "'." << endl; //cout << " |______________________________________________________________" << endl; } }
void UnifiedModel::setParameterVectorAll(const VectorXd& values) { if (all_values_vector_size_ != values.size()) { cerr << __FILE__ << ":" << __LINE__ << ": values is of wrong size." << endl; return; } int offset = 0; int n_basis_functions = centers_.size(); int n_dims = getExpectedInputDim(); for (int i_bfs=0; i_bfs<n_basis_functions; i_bfs++) { VectorXd cur_center = values.segment(offset,n_dims); // If the centers change, the cache for normalizedKernelActivations() must be cleared, // because this function will return different values for different centers if ( !(centers_[i_bfs].array() == cur_center.array()).all() ) clearCache(); centers_[i_bfs] = values.segment(offset,n_dims) ; offset += n_dims; VectorXd cur_width = values.segment(offset,n_dims); // If the centers change, the cache for normalizedKernelActivations() must be cleared, // because this function will return different values for different centers if ( !(covars_[i_bfs].diagonal().array() == cur_width.array()).all() ) clearCache(); covars_[i_bfs].diagonal() = values.segment(offset,n_dims) ; offset += n_dims; // Cache must not be cleared, because normalizedKernelActivations() returns the same values. slopes_[i_bfs] = values.segment(offset,n_dims) ; offset += n_dims; offsets_[i_bfs] = values[offset] ; offset += 1; priors_[i_bfs] = values[offset] ; offset += 1; } /* int offset = 0; int size = centers_.rows(); int n_dims = centers_.cols(); for (int i_dim=0; i_dim<n_dims; i_dim++) { // If the centers change, the cache for normalizedKernelActivations() must be cleared, // because this function will return different values for different centers if ( !(centers_.col(i_dim).array() == values.segment(offset,size).array()).all() ) clearCache(); centers_.col(i_dim) = values.segment(offset,size); offset += size; } for (int i_dim=0; i_dim<n_dims; i_dim++) { // If the centers change, the cache for normalizedKernelActivations() must be cleared, // because this function will return different values for different centers if ( !(covars_.col(i_dim).array() == values.segment(offset,size).array()).all() ) clearCache(); covars_.col(i_dim) = values.segment(offset,size); offset += size; } offsets_ = values.segment(offset,size); offset += size; // Cache must not be cleared, because normalizedKernelActivations() returns the same values. MatrixXd old_slopes = slopes_; for (int i_dim=0; i_dim<n_dims; i_dim++) { slopes_.col(i_dim) = values.segment(offset,size); offset += size; // Cache must not be cleared, because normalizedKernelActivations() returns the same values. } */ assert(offset == getParameterVectorAllSize()); };