inline void MeanModel::addNewPoint(const vectord &x) { using boost::numeric::ublas::column; mFeatM.resize(mFeatM.size1(),mFeatM.size2()+1); column(mFeatM,mFeatM.size2()-1) = mMean->getFeatures(x); }
/************************************************************************************************** * Procedure * * * * Description: getSigmaPoints * * Class : UnscentedExpectedImprovement * **************************************************************************************************/ void UnscentedExpectedImprovement::getSigmaPoints(const vectord& x , const double scale , const int dim , const matrixd& matrix_noise , std::vector<vectord>& xx , std::vector<double>& w , const bool matrix_convert) { const size_t n = dim; assert(matrix_noise.size1() == n); assert(matrix_noise.size2() == n); assert(x.size() == n); matrixd px; if (matrix_convert) px = UnscentedExpectedImprovement::convertMatrixNoise(matrix_noise, scale, dim); else px = matrix_noise; // Output variable intialization xx = std::vector<vectord>(); w = std::vector<double>(); xx.push_back(x); w .push_back(scale / (dim + scale)); // Calculate query_i for (size_t col = 0; col < n; col += 1) { xx.push_back(x - boost::numeric::ublas::column(px, col)); xx.push_back(x + boost::numeric::ublas::column(px, col)); w .push_back(0.5 / (dim + scale)); w .push_back(0.5 / (dim + scale)); } }
/************************************************************************************************** * Procedure * * * * Description: isDiag * * Class : UnscentedExpectedImprovement * **************************************************************************************************/ bool UnscentedExpectedImprovement::isDiag(matrixd matrix) { if (matrix.size1() == matrix.size2()) { for (size_t row = 0; row < matrix.size1(); ++row) { for (size_t col = 0; col < matrix.size2(); ++col) { if (row != col) { if (std::abs(matrix(row, col)) > std::numeric_limits<double>::epsilon()) { return false; } } } } return true; } return false; }
/************************************************************************************************** * Procedure * * * * Description: convertMatrixToParams * * Class : UnscentedExpectedImprovement * **************************************************************************************************/ void UnscentedExpectedImprovement::convertMatrixToParams(bopt_params& params, const matrixd px) { if (px.size1() != px.size2()) return; size_t dim = px.size1(); for (size_t row = 0; row < dim; ++row) { for (size_t col = 0; col < dim; ++col) { params.crit_params[4 + col + (row * dim)] = px(row, col); params.input.noise[0 + col + (row * dim)] = px(row, col); } } }
void KernelModel::computeCorrMatrix(const vecOfvec& XX, matrixd& corrMatrix, double nugget) { assert(corrMatrix.size1() == XX.size()); assert(corrMatrix.size2() == XX.size()); const size_t nSamples = XX.size(); for (size_t ii=0; ii< nSamples; ++ii) { for (size_t jj=0; jj < ii; ++jj) { corrMatrix(ii,jj) = (*mKernel)(XX[ii], XX[jj]); corrMatrix(jj,ii) = corrMatrix(ii,jj); } corrMatrix(ii,ii) = (*mKernel)(XX[ii],XX[ii]) + nugget; } }
void KernelModel::computeDerivativeCorrMatrix(const vecOfvec& XX, matrixd& corrMatrix, int dth_index) { assert(corrMatrix.size1() == XX.size()); assert(corrMatrix.size2() == XX.size()); const size_t nSamples = XX.size(); for (size_t ii=0; ii< nSamples; ++ii) { for (size_t jj=0; jj < ii; ++jj) { corrMatrix(ii,jj) = mKernel->gradient(XX[ii],XX[jj], dth_index); corrMatrix(jj,ii) = corrMatrix(ii,jj); } corrMatrix(ii,ii) = mKernel->gradient(XX[ii],XX[ii],dth_index); } }