FunctionApproximator* FunctionApproximatorIRFRLS::clone(void) const { MetaParametersIRFRLS* meta_params = NULL; if (getMetaParameters()!=NULL) meta_params = dynamic_cast<MetaParametersIRFRLS*>(getMetaParameters()->clone()); ModelParametersIRFRLS* model_params = NULL; if (getModelParameters()!=NULL) model_params = dynamic_cast<ModelParametersIRFRLS*>(getModelParameters()->clone()); if (meta_params==NULL) return new FunctionApproximatorIRFRLS(model_params); else return new FunctionApproximatorIRFRLS(meta_params,model_params); };
FunctionApproximator* FunctionApproximatorGPR::clone(void) const { // All error checking and cloning is left to the FunctionApproximator constructor. return new FunctionApproximatorGPR( dynamic_cast<const MetaParametersGPR*>(getMetaParameters()), dynamic_cast<const ModelParametersGPR*>(getModelParameters()) ); };
void TransformationDescription::invert() { for (TransformationDescription::DataPoints::iterator it = data_.begin(); it != data_.end(); ++it) { *it = make_pair(it->second, it->first); } // ugly hack for linear model with explicit slope/intercept parameters: if ((model_type_ == "linear") && data_.empty()) { TransformationModelLinear* lm = dynamic_cast<TransformationModelLinear*>(model_); lm->invert(); } else { Param params = getModelParameters(); fitModel(model_type_, params); } }
void FunctionApproximatorIRFRLS::predict(const MatrixXd& input, MatrixXd& output) { if (!isTrained()) { cerr << "WARNING: You may not call FunctionApproximatorIRFRLS::predict if you have not trained yet. Doing nothing." << endl; return; } const ModelParametersIRFRLS* model_parameters_irfrls = static_cast<const ModelParametersIRFRLS*>(getModelParameters()); MatrixXd proj_inputs; proj(input, model_parameters_irfrls->cosines_periodes_, model_parameters_irfrls->cosines_phase_, proj_inputs); output = proj_inputs * model_parameters_irfrls->linear_models_; }
bool FunctionApproximatorGPR::saveGridData(const VectorXd& min, const VectorXd& max, const VectorXi& n_samples_per_dim, string save_directory, bool overwrite) const { if (save_directory.empty()) return true; MatrixXd inputs_grid; FunctionApproximator::generateInputsGrid(min, max, n_samples_per_dim, inputs_grid); const ModelParametersGPR* model_parameters_gpr = static_cast<const ModelParametersGPR*>(getModelParameters()); MatrixXd activations_grid; model_parameters_gpr->kernelActivations(inputs_grid, activations_grid); saveMatrix(save_directory,"n_samples_per_dim.txt",n_samples_per_dim,overwrite); saveMatrix(save_directory,"inputs_grid.txt",inputs_grid,overwrite); saveMatrix(save_directory,"activations_grid.txt",activations_grid,overwrite); // Weight the basis function activations VectorXd weights = model_parameters_gpr->weights(); for (int b=0; b<activations_grid.cols(); b++) activations_grid.col(b).array() *= weights(b); saveMatrix(save_directory,"activations_weighted_grid.txt",activations_grid,overwrite); // Sum over weighed basis functions MatrixXd predictions_grid = activations_grid.rowwise().sum(); saveMatrix(save_directory,"predictions_grid.txt",predictions_grid,overwrite); return true; }
void FunctionApproximatorGPR::predictVariance(const MatrixXd& inputs, MatrixXd& variances) { if (!isTrained()) { cerr << "WARNING: You may not call FunctionApproximatorLWPR::predict if you have not trained yet. Doing nothing." << endl; return; } const ModelParametersGPR* model_parameters_gpr = static_cast<const ModelParametersGPR*>(getModelParameters()); assert(inputs.cols()==getExpectedInputDim()); unsigned int n_samples = inputs.rows(); variances.resize(n_samples,1); MatrixXd ks; model_parameters_gpr->kernelActivations(inputs, ks); double maximum_covariance = model_parameters_gpr->maximum_covariance(); MatrixXd gram_inv = model_parameters_gpr->gram_inv(); for (unsigned int ii=0; ii<n_samples; ii++) variances(ii) = maximum_covariance - (ks.row(ii)*gram_inv).dot(ks.row(ii).transpose()); }
void FunctionApproximatorGPR::predict(const MatrixXd& inputs, MatrixXd& outputs) { if (!isTrained()) { cerr << "WARNING: You may not call FunctionApproximatorLWPR::predict if you have not trained yet. Doing nothing." << endl; return; } const ModelParametersGPR* model_parameters_gpr = static_cast<const ModelParametersGPR*>(getModelParameters()); assert(inputs.cols()==getExpectedInputDim()); unsigned int n_samples = inputs.rows(); outputs.resize(n_samples,1); MatrixXd ks; model_parameters_gpr->kernelActivations(inputs, ks); VectorXd weights = model_parameters_gpr->weights(); for (unsigned int ii=0; ii<n_samples; ii++) outputs(ii) = ks.row(ii).dot(weights); }