void Split(const arma::Mat<T>& input, const arma::Row<U>& inputLabel, arma::Mat<T>& trainData, arma::Mat<T>& testData, arma::Row<U>& trainLabel, arma::Row<U>& testLabel, const double testRatio) { const size_t testSize = static_cast<size_t>(input.n_cols * testRatio); const size_t trainSize = input.n_cols - testSize; trainData.set_size(input.n_rows, trainSize); testData.set_size(input.n_rows, testSize); trainLabel.set_size(trainSize); testLabel.set_size(testSize); const arma::Col<size_t> order = arma::shuffle(arma::linspace<arma::Col<size_t>>(0, input.n_cols - 1, input.n_cols)); for (size_t i = 0; i != trainSize; ++i) { trainData.col(i) = input.col(order[i]); trainLabel(i) = inputLabel(order[i]); } for (size_t i = 0; i != testSize; ++i) { testData.col(i) = input.col(order[i + trainSize]); testLabel(i) = inputLabel(order[i + trainSize]); } }
arma::Col<double> compute_column_rms(const arma::Mat<double>& data) { const long n_cols = data.n_cols; arma::Col<double> rms(n_cols); for (long i=0; i<n_cols; ++i) { const double dot = arma::dot(data.col(i), data.col(i)); rms(i) = std::sqrt(dot / (data.col(i).n_rows-1)); } return std::move(rms); }
void enforce_positive_sign_by_column(arma::Mat<double>& data) { for (long i=0; i<long(data.n_cols); ++i) { const double max = arma::max(data.col(i)); const double min = arma::min(data.col(i)); bool change_sign = false; if (std::abs(max)>=std::abs(min)) { if (max<0) change_sign = true; } else { if (min<0) change_sign = true; } if (change_sign) data.col(i) *= -1; } }
void calculateJacobian(const arma::Mat<std::complex<double> >& myOffsets, arma::Mat<double>& myJacobian, arma::Col<std::complex<double> >& myTargetsCalculated, arma::Col<std::complex<double> >& myCurrentGuess, void myCalculateDependentVariables(const arma::Mat<std::complex<double> >&, const arma::Col<std::complex<double> >&, arma::Col<std::complex<double> >&)) { //Calculate a temporary, unperturbed target evaluation, such as is needed for solving for the updated guess //formula arma::Col<std::complex<double> > unperturbedTargetsCalculated(NUMDIMENSIONS); unperturbedTargetsCalculated.fill(0.0); myCalculateDependentVariables(myOffsets, myCurrentGuess, unperturbedTargetsCalculated); std::complex<double> oldGuessValue(0.0, 0.0); //Each iteration fills a column in the Jacobian //The Jacobian takes this form: // // dF0/dx0 dF0/dx1 // dF1/dx0 dF1/dx1 // for(int j = 0; j< NUMDIMENSIONS; j++) { //Store old element value, perturb the current value oldGuessValue = myCurrentGuess[j]; myCurrentGuess[j] += std::complex<double>(0.0, PROBEDISTANCE); //Evaluate functions for perturbed guess myCalculateDependentVariables(myOffsets, myCurrentGuess, myTargetsCalculated); //The column of the Jacobian that goes with the independent variable we perturbed //can be determined using the finite-difference formula //The arma::Col allows this to be expressed as a single vector operation //note slice works as: std::slice(start_index, number_of_elements_to_access, index_interval_between_selections) //std::cout << "Jacobian column " << j << " with:" << std::endl; //std::cout << "myTargetsCalculated" << std::endl; //std::cout << myTargetsCalculated << std::endl; //std::cout << "unperturbedTargetsCalculated" << std::endl; //std::cout << unperturbedTargetsCalculated << std::endl; myJacobian.col(j) = arma::imag(myTargetsCalculated); myJacobian.col(j) *= pow(PROBEDISTANCE, -1.0); //std::cout << "The jacobian: " << std::endl; //std::cout << myJacobian << std::endl; myCurrentGuess[j] = oldGuessValue; } //Reset to unperturbed, so we dont waste a function evaluation myTargetsCalculated = unperturbedTargetsCalculated; }
void Backward(const arma::Cube<eT>& input, const arma::Mat<eT>& gy, arma::Cube<eT>& g) { // Generate a cube using the backpropagated error matrix. arma::Cube<eT> mappedError = arma::zeros<arma::cube>(input.n_rows, input.n_cols, input.n_slices); for (size_t s = 0, j = 0; s < mappedError.n_slices; s+= gy.n_cols, j++) { for (size_t i = 0; i < gy.n_cols; i++) { arma::Col<eT> temp = gy.col(i).subvec( j * input.n_rows * input.n_cols, (j + 1) * input.n_rows * input.n_cols - 1); mappedError.slice(s + i) = arma::Mat<eT>(temp.memptr(), input.n_rows, input.n_cols); } } arma::Cube<eT> derivative; Deriv(input, derivative); g = mappedError % derivative; }
inline ParallelKinematicMachine3PUPS::ParallelKinematicMachine3PUPS() noexcept { setMinimalActiveJointActuations({0.39, 0.39, 0.39}); setMaximalActiveJointActuations({0.95, 0.95, 0.95}); setEndEffectorJointPositions({ -0.025561381023353, 0.086293776138137, 0.12, 0.087513292835791, -0.021010082747031, 0.12, -0.061951911812438, -0.065283693391106, 0.12}); setRedundantJointStartPositions({ -0.463708870031622, 0.417029254828353, -0.346410161513775, 0.593012363818459, 0.193069033993384, -0.346410161513775, -0.129303493786837, -0.610098288821738, -0.346410161513775}); setRedundantJointEndPositions({ -0.247202519085512, 0.292029254828353, 0.086602540378444, 0.376506012872349, 0.068069033993384, 0.086602540378444, -0.129303493786837, -0.360098288821738, 0.086602540378444}); redundantJointStartToEndPositions_ = redundantJointEndPositions_ - redundantJointStartPositions_; redundantJointIndicies_ = arma::find(arma::any(redundantJointStartToEndPositions_)); redundantJointAngles_.set_size(3, redundantJointIndicies_.n_elem); for (std::size_t n = 0; n < redundantJointIndicies_.n_elem; ++n) { const double& redundantJointXAngle = std::atan2(redundantJointStartToEndPositions_(1, n), redundantJointStartToEndPositions_(0, n)); const double& redundantJointYAngle = std::atan2(redundantJointStartToEndPositions_(2, n), redundantJointStartToEndPositions_(1, n)); redundantJointAngles_.col(n) = arma::Col<double>::fixed<3>({std::cos(redundantJointXAngle) * std::cos(redundantJointYAngle), std::sin(redundantJointXAngle) * std::cos(redundantJointYAngle), std::sin(redundantJointYAngle)}); } }
arma::Col<double> compute_column_means(const arma::Mat<double>& data) { const long n_cols = data.n_cols; arma::Col<double> means(n_cols); for (long i=0; i<n_cols; ++i) means(i) = arma::mean(data.col(i)); return std::move(means); }
void FeedBackward(const arma::Cube<eT>& inputActivation, const arma::Mat<eT>& error, arma::Cube<eT>& delta) { delta = delta % mask * scale; // Generate a cube from the error matrix. arma::Cube<eT> mappedError = arma::zeros<arma::cube>(inputActivation.n_rows, inputActivation.n_cols, inputActivation.n_slices); for (size_t s = 0, j = 0; s < mappedError.n_slices; s+= error.n_cols, j++) { for (size_t i = 0; i < error.n_cols; i++) { arma::Col<eT> temp = error.col(i).subvec( j * inputActivation.n_rows * inputActivation.n_cols, (j + 1) * inputActivation.n_rows * inputActivation.n_cols - 1); mappedError.slice(s + i) = arma::Mat<eT>(temp.memptr(), inputActivation.n_rows, inputActivation.n_cols); } } delta = mappedError; }
arma::Row<T> column_apply(const arma::Mat<T>& matrix, Functor functor) { arma::Row<T> result(matrix.n_cols); std::size_t index = 0; std::generate(result.begin(), result.end(), [&matrix, &index, &functor] { return functor(matrix.col(index++)); }); return result; }
void normalize_by_column(arma::Mat<double>& data, const arma::Col<double>& rms) { if (data.n_cols != rms.n_elem) throw std::range_error("Number of elements of rms is not equal to the number of columns of data"); for (long i=0; i<long(data.n_cols); ++i) { if (rms(i)==0) throw std::runtime_error("At least one of the entries of rms equals to zero"); data.col(i) *= 1./rms(i); } }
// Predict the rating for a group of user/item combinations. void CF::Predict(const arma::Mat<size_t>& combinations, arma::vec& predictions) const { // First, for nearest neighbor search, stretch the H matrix. arma::mat l = arma::chol(w.t() * w); arma::mat stretchedH = l * h; // Due to the Armadillo API, l is L^T. // Now, we must determine those query indices we need to find the nearest // neighbors for. This is easiest if we just sort the combinations matrix. arma::Mat<size_t> sortedCombinations(combinations.n_rows, combinations.n_cols); arma::uvec ordering = arma::sort_index(combinations.row(0).t()); for (size_t i = 0; i < ordering.n_elem; ++i) sortedCombinations.col(i) = combinations.col(ordering[i]); // Now, we have to get the list of unique users we will be searching for. arma::Col<size_t> users = arma::unique(combinations.row(0).t()); // Assemble our query matrix from the stretchedH matrix. arma::mat queries(stretchedH.n_rows, users.n_elem); for (size_t i = 0; i < queries.n_cols; ++i) queries.col(i) = stretchedH.col(users[i]); // Now calculate the neighborhood of these users. neighbor::KNN a(stretchedH); arma::mat distances; arma::Mat<size_t> neighborhood; a.Search(queries, numUsersForSimilarity, neighborhood, distances); // Now that we have the neighborhoods we need, calculate the predictions. predictions.set_size(combinations.n_cols); size_t user = 0; // Cumulative user count, because we are doing it in order. for (size_t i = 0; i < sortedCombinations.n_cols; ++i) { // Could this be made faster by calculating dot products for multiple items // at once? double rating = 0.0; // Map the combination's user to the user ID used for kNN. while (users[user] < sortedCombinations(0, i)) ++user; for (size_t j = 0; j < neighborhood.n_rows; ++j) rating += arma::as_scalar(w.row(sortedCombinations(1, i)) * h.col(neighborhood(j, user))); rating /= neighborhood.n_rows; predictions(ordering[i]) = rating; } }
int DynamicsTrappenberg::update_y( const arma::Mat< double > x, const arma::Mat< double > u, const mxArray *theta, const mxArray *ptheta, arma::Mat< double > &y) { y = x.col(INDEX_NODES); return 0; }
/** * Given the sufficient statistics of a proposed split, calculate the * information gain if that split was to be used. The 'counts' matrix should * contain the number of points in each class in each column, so the size of * 'counts' is children x classes, where 'children' is the number of child * nodes in the proposed split. * * @param counts Matrix of sufficient statistics. */ static double Evaluate(const arma::Mat<size_t>& counts) { // Calculate the number of elements in the unsplit node and also in each // proposed child. size_t numElem = 0; arma::vec splitCounts(counts.n_elem); for (size_t i = 0; i < counts.n_cols; ++i) { splitCounts[i] = arma::accu(counts.col(i)); numElem += splitCounts[i]; } // Corner case: if there are no elements, the gain is zero. if (numElem == 0) return 0.0; arma::Col<size_t> classCounts = arma::sum(counts, 1); // Calculate the gain of the unsplit node. double gain = 0.0; for (size_t i = 0; i < classCounts.n_elem; ++i) { const double f = ((double) classCounts[i] / (double) numElem); if (f > 0.0) gain -= f * std::log2(f); } // Now calculate the impurity of the split nodes and subtract them from the // overall gain. for (size_t i = 0; i < counts.n_cols; ++i) { if (splitCounts[i] > 0) { double splitGain = 0.0; for (size_t j = 0; j < counts.n_rows; ++j) { const double f = ((double) counts(j, i) / (double) splitCounts[i]); if (f > 0.0) splitGain += f * std::log2(f); } gain += ((double) splitCounts[i] / (double) numElem) * splitGain; } } return gain; }
static double Evaluate(const arma::Mat<size_t>& counts) { // We need to sum over the difference between the un-split node and the // split nodes. First we'll calculate the number of elements in each split // and total. size_t numElem = 0; arma::vec splitCounts(counts.n_cols); for (size_t i = 0; i < counts.n_cols; ++i) { splitCounts[i] = arma::accu(counts.col(i)); numElem += splitCounts[i]; } // Corner case: if there are no elements, the impurity is zero. if (numElem == 0) return 0.0; arma::Col<size_t> classCounts = arma::sum(counts, 1); // Calculate the Gini impurity of the un-split node. double impurity = 0.0; for (size_t i = 0; i < classCounts.n_elem; ++i) { const double f = ((double) classCounts[i] / (double) numElem); impurity += f * (1.0 - f); } // Now calculate the impurity of the split nodes and subtract them from the // overall impurity. for (size_t i = 0; i < counts.n_cols; ++i) { if (splitCounts[i] > 0) { double splitImpurity = 0.0; for (size_t j = 0; j < counts.n_rows; ++j) { const double f = ((double) counts(j, i) / (double) splitCounts[i]); splitImpurity += f * (1.0 - f); } impurity -= ((double) splitCounts[i] / (double) numElem) * splitImpurity; } } return impurity; }
arma::Mat< double > DynamicsTrappenberg::sigma( const arma::Mat< double > x, const arma::Mat< double > u, const mxArray *theta, const mxArray *ptheta) { arma::Mat< double > beta(mxGetPr(mxGetField(theta, 0, "beta")), x.n_rows, 1, 1); arma::Mat< double > baseline(mxGetPr(mxGetField(theta, 0, "theta")), x.n_rows, 1, 1); arma::Mat< double > dx = 1/(1 + exp(-x.col(INDEX_NODES) % beta + baseline)); return dx; }
/** * Convert the features of images into histogram * @param features features of the image * @param code_book code book of the data sets * @return histogram of bovw */ Hist describe(arma::Mat<T> const &features, arma::Mat<T> const &code_book) const { arma::Mat<T> dist(features.n_cols, code_book.n_cols); for(arma::uword i = 0; i != features.n_cols; ++i){ dist.row(i) = euclidean_dist(features.col(i), code_book); } //dist.print("dist"); Hist hist = create_hist(dist.n_cols, armd::is_two_dim<Hist>::type()); for(arma::uword i = 0; i != dist.n_rows; ++i){ arma::uword min_idx; dist.row(i).min(min_idx); ++hist(min_idx); } //hist.print("\nhist"); return hist; }
void DefaultLocalAssemblerForOperatorsOnSurfacesUtilities<BasisFunctionType>:: precalculateElementSizesAndCentersForSingleGrid( const RawGridGeometry<CoordinateType> &rawGeometry, std::vector<CoordinateType> &elementSizesSquared, arma::Mat<CoordinateType> &elementCenters, CoordinateType &averageElementSize) { const size_t elementCount = rawGeometry.elementCount(); const int worldDim = rawGeometry.worldDimension(); averageElementSize = 0.; // We will store here temporarily // the sum of element sizes elementSizesSquared.resize(elementCount); for (int e = 0; e < elementCount; ++e) { elementSizesSquared[e] = elementSizeSquared(e, rawGeometry); averageElementSize += sqrt(elementSizesSquared[e]); } averageElementSize /= elementCount; elementCenters.set_size(worldDim, elementCount); for (int e = 0; e < elementCount; ++e) elementCenters.col(e) = elementCenter(e, rawGeometry); }
void Backward(const arma::Cube<eT>& /* unused */, const arma::Mat<eT>& gy, arma::Cube<eT>& g) { // Generate a cube from the error matrix. arma::Cube<eT> mappedError = arma::zeros<arma::cube>(outputParameter.n_rows, outputParameter.n_cols, outputParameter.n_slices); for (size_t s = 0, j = 0; s < mappedError.n_slices; s+= gy.n_cols, j++) { for (size_t i = 0; i < gy.n_cols; i++) { arma::Col<eT> temp = gy.col(i).subvec( j * outputParameter.n_rows * outputParameter.n_cols, (j + 1) * outputParameter.n_rows * outputParameter.n_cols - 1); mappedError.slice(s + i) = arma::Mat<eT>(temp.memptr(), outputParameter.n_rows, outputParameter.n_cols); } } Backward(inputParameter, mappedError, g); }
void RAModel<SortPolicy>::Search(arma::mat&& querySet, const size_t k, arma::Mat<size_t>& neighbors, arma::mat& distances) { // Apply the random basis if necessary. if (randomBasis) querySet = q * querySet; Log::Info << "Searching for " << k << " approximate nearest neighbors with "; if (!Naive() && !SingleMode()) Log::Info << "dual-tree rank-approximate " << TreeName() << " search..."; else if (!Naive()) Log::Info << "single-tree rank-approximate " << TreeName() << " search..."; else Log::Info << "brute-force (naive) rank-approximate search..."; Log::Info << std::endl; switch (treeType) { case KD_TREE: if (!kdTreeRA->Naive() && !kdTreeRA->SingleMode()) { // Build a second tree and search. Timer::Start("tree_building"); Log::Info << "Building query tree..." << std::endl; std::vector<size_t> oldFromNewQueries; typename RAType<tree::KDTree>::Tree queryTree(std::move(querySet), oldFromNewQueries, leafSize); Log::Info << "Tree built." << std::endl; Timer::Stop("tree_building"); arma::Mat<size_t> neighborsOut; arma::mat distancesOut; kdTreeRA->Search(&queryTree, k, neighborsOut, distancesOut); // Unmap the query points. distances.set_size(distancesOut.n_rows, distancesOut.n_cols); neighbors.set_size(neighborsOut.n_rows, neighborsOut.n_cols); for (size_t i = 0; i < neighborsOut.n_cols; ++i) { neighbors.col(oldFromNewQueries[i]) = neighborsOut.col(i); distances.col(oldFromNewQueries[i]) = distancesOut.col(i); } } else { // Search without building a second tree. kdTreeRA->Search(querySet, k, neighbors, distances); } break; case COVER_TREE: // No mapping necessary. coverTreeRA->Search(querySet, k, neighbors, distances); break; case R_TREE: // No mapping necessary. rTreeRA->Search(querySet, k, neighbors, distances); break; case R_STAR_TREE: // No mapping necessary. rStarTreeRA->Search(querySet, k, neighbors, distances); break; case X_TREE: // No mapping necessary. xTreeRA->Search(querySet, k, neighbors, distances); break; } }
inline double ParallelKinematicMachine3PUPS::getEndEffectorPoseError( const arma::Col<double>::fixed<6>& endEffectorPose, const arma::Row<double>& redundantJointActuations) const { const arma::Cube<double>::fixed<3, 3, 2>& model = getModel(endEffectorPose, redundantJointActuations); const arma::Mat<double>::fixed<3, 3>& baseJoints = model.slice(0); const arma::Mat<double>::fixed<3, 3>& endEffectorJoints = model.slice(1); arma::Mat<double>::fixed<3, 3> endEffectorJointsRotated = endEffectorJoints; endEffectorJointsRotated.each_col() -= endEffectorPose.subvec(0, 1); const arma::Mat<double>::fixed<3, 3>& baseToEndEffectorJointPositions = endEffectorJoints - baseJoints; const arma::Row<double>::fixed<3>& baseToEndEffectorJointActuations = arma::sqrt(arma::sum(arma::square(baseToEndEffectorJointPositions))); if (arma::any(baseToEndEffectorJointActuations < minimalActiveJointActuations_) || arma::any(baseToEndEffectorJointActuations > maximalActiveJointActuations_)) { return 0.0; } arma::Mat<double>::fixed<6, 3> forwardKinematic; forwardKinematic.head_rows(3) = baseToEndEffectorJointPositions; for (std::size_t n = 0; n < baseToEndEffectorJointPositions.n_cols; ++n) { forwardKinematic.submat(3, n, 5, n) = arma::cross(endEffectorJointsRotated.col(n), baseToEndEffectorJointPositions.col(n)); } arma::Mat<double> inverseKinematic(6, 3 + redundantJointIndicies_.n_elem, arma::fill::zeros); inverseKinematic.diag() = -arma::sqrt(arma::sum(arma::square(baseToEndEffectorJointPositions))); for (std::size_t n = 0; n < redundantJointIndicies_.n_elem; ++n) { const unsigned int& redundantJointIndex = redundantJointIndicies_(n); inverseKinematic(n, 3 + n) = arma::dot(baseToEndEffectorJointPositions.col(redundantJointIndex), redundantJointAngles_.col(redundantJointIndex)); } return -1.0 / arma::cond(arma::solve(forwardKinematic.t(), inverseKinematic)); }
inline arma::Col<double>::fixed<6> ParallelKinematicMachine3PUPS::getEndEffectorPose( const arma::Row<double>::fixed<3>& actuations, const arma::Row<double>::fixed<3>& endEffectorRotation, const arma::Row<double>& redundantJointActuations) const { verify(arma::any(arma::vectorise(redundantJointActuations < 0)) || arma::any(arma::vectorise(redundantJointActuations > 1)), "All values for the actuation of redundantion joints must be between [0, 1]."); verify(redundantJointActuations.n_elem == redundantJointIndicies_.n_elem, "The number of redundant actuations must be equal to the number of redundant joints."); const double& endEffectorRollAngle = endEffectorRotation(0); const double& endEffectorPitchAngle = endEffectorRotation(1); const double& endEffectorYawAngle = endEffectorRotation(2); arma::Mat<double> baseJointPositions = redundantJointStartPositions_; for (std::size_t n = 0; n < redundantJointIndicies_.n_elem; n++) { const unsigned int& redundantJointIndex = redundantJointIndicies_(n); baseJointPositions.col(redundantJointIndex) += redundantJointActuations(redundantJointIndex) * redundantJointStartToEndPositions_.col(redundantJointIndex); } baseJointPositions -= get3DRotation(endEffectorRollAngle, endEffectorPitchAngle, endEffectorYawAngle) * endEffectorJointPositions_; return arma::join_cols(getTriangulation(baseJointPositions.col(0), actuations(0), baseJointPositions.col(1), actuations(1), baseJointPositions.col(2), actuations(2)), endEffectorRotation); }
inline arma::Cube<double>::fixed<3, 3, 2> ParallelKinematicMachine3PUPS::getModel( const arma::Col<double>::fixed<6>& endEffectorPose, const arma::Row<double>& redundantJointActuations) const { verify(arma::any(arma::vectorise(redundantJointActuations < 0)) || arma::any(arma::vectorise(redundantJointActuations > 1)), "All values for the actuation of redundantion joints must be between [0, 1]."); verify(redundantJointActuations.n_elem == redundantJointIndicies_.n_elem, "The number of redundant actuations must be equal to the number of redundant joints."); arma::Cube<double>::fixed<3, 3, 2> model; const arma::Col<double>::fixed<3>& endEffectorPosition = endEffectorPose.subvec(0, 2); const double& endEffectorRollAngle = endEffectorPose(3); const double& endEffectorPitchAngle = endEffectorPose(4); const double& endEffectorYawAngle = endEffectorPose(5); model.slice(0) = redundantJointStartPositions_; for (std::size_t n = 0; n < redundantJointIndicies_.n_elem; n++) { const unsigned int& redundantJointIndex = redundantJointIndicies_(n); model.slice(0).col(redundantJointIndex) += redundantJointActuations(redundantJointIndex) * redundantJointStartToEndPositions_.col(redundantJointIndex); } model.slice(1) = get3DRotation(endEffectorRollAngle, endEffectorPitchAngle, endEffectorYawAngle) * endEffectorJointPositions_; model.slice(1).each_col() += endEffectorPosition; return model; }
void NaiveBayes<T>::Train(const arma::Mat<T>& trainingData, const arma::Row<int>& classLabels) { //Reset prob states priorProbs.zeros(); featureMeans.zeros(); featureVariances.zeros(); //Mean sum. Sum the feature values for each class label instance for (size_t j = 0; j < trainingData.n_cols; ++j) { const auto label = classLabels[j]; //Increment num occurences of current class in priorProbs set. ++priorProbs[label]; featureMeans.col(label) += trainingData.col(j); } //Normalise mean sums. Divide class feature sums by number of class occurences. for (size_t i = 0; i < priorProbs.n_elem; ++i) { //Check if probability of each class != 0 due to no instance of class in training set. //Avoids divide by zero errors. if (priorProbs[i] != 0.0) featureMeans.col(i) /= priorProbs[i]; //divide by total num class occurences. } //Variances sum for (size_t j = 0; j < trainingData.n_cols; ++j) { const auto label = classLabels[j]; /** * Should be real-time safe with arma::square not malloc / dynamic allocating * due to use of expression templates. */ featureVariances.col(label) += arma::square(trainingData.col(j) - featureMeans.col(label)); } //Normalise variance sums for (size_t i = 0; i < priorProbs.n_elem; ++i) { if (priorProbs[i] > 1) featureVariances.col(i) /= (priorProbs[i] - 1); } /** Ensure that the featureVariances can be inverted. * The distribution calculation takes the standard deviation which * is equal to sqrt(featureVariances). The standard deviation is then inverted * i.e. 1/stdDev or equivalently arma::pow(stdDev, -1). So this will cause a * divide by zero type error if any attribute variances are equal to 0.0 * The stdDev will then contain -inf or NaN values which causes calculation errors. * Replace 0.0 variances with minimal floating point value. */ for (size_t i = 0; i < featureVariances.n_elem; ++i) { if (featureVariances[i] == 0.0) featureVariances[i] = std::numeric_limits<T>::min(); } //Normalise prior probabilities priorProbs /= static_cast<T>(trainingData.n_cols); }
void NeighborSearch<SortPolicy, MetricType, TreeType>::Search( const size_t k, arma::Mat<size_t>& resultingNeighbors, arma::mat& distances) { Timer::Start("computing_neighbors"); // If we have built the trees ourselves, then we will have to map all the // indices back to their original indices when this computation is finished. // To avoid an extra copy, we will store the neighbors and distances in a // separate matrix. arma::Mat<size_t>* neighborPtr = &resultingNeighbors; arma::mat* distancePtr = &distances; if (ownQueryTree || (ownReferenceTree && !queryTree)) distancePtr = new arma::mat; // Query indices need to be mapped. if (ownReferenceTree || ownQueryTree) neighborPtr = new arma::Mat<size_t>; // All indices need mapping. // Set the size of the neighbor and distance matrices. neighborPtr->set_size(k, querySet.n_cols); distancePtr->set_size(k, querySet.n_cols); distancePtr->fill(SortPolicy::WorstDistance()); size_t numPrunes = 0; if (singleMode) { // Create the helper object for the tree traversal. typedef NeighborSearchRules<SortPolicy, MetricType, TreeType> RuleType; RuleType rules(referenceSet, querySet, *neighborPtr, *distancePtr, metric); // Create the traverser. typename TreeType::template SingleTreeTraverser<RuleType> traverser(rules); // Now have it traverse for each point. for (size_t i = 0; i < querySet.n_cols; ++i) traverser.Traverse(i, *referenceTree); numPrunes = traverser.NumPrunes(); } else // Dual-tree recursion. { // Create the helper object for the tree traversal. typedef NeighborSearchRules<SortPolicy, MetricType, TreeType> RuleType; RuleType rules(referenceSet, querySet, *neighborPtr, *distancePtr, metric); typename TreeType::template DualTreeTraverser<RuleType> traverser(rules); if (queryTree) traverser.Traverse(*queryTree, *referenceTree); else traverser.Traverse(*referenceTree, *referenceTree); numPrunes = traverser.NumPrunes(); } Log::Warn << "Pruned " << numPrunes << " nodes." << std::endl; Timer::Stop("computing_neighbors"); // Now, do we need to do mapping of indices? if (!ownReferenceTree && !ownQueryTree) { // No mapping needed. We are done. return; } else if (ownReferenceTree && ownQueryTree) // Map references and queries. { // Set size of output matrices correctly. resultingNeighbors.set_size(k, querySet.n_cols); distances.set_size(k, querySet.n_cols); for (size_t i = 0; i < distances.n_cols; i++) { // Map distances (copy a column). distances.col(oldFromNewQueries[i]) = distancePtr->col(i); // Map indices of neighbors. for (size_t j = 0; j < distances.n_rows; j++) { resultingNeighbors(j, oldFromNewQueries[i]) = oldFromNewReferences[(*neighborPtr)(j, i)]; } } // Finished with temporary matrices. delete neighborPtr; delete distancePtr; } else if (ownReferenceTree) { if (!queryTree) // No query tree -- map both references and queries. { resultingNeighbors.set_size(k, querySet.n_cols); distances.set_size(k, querySet.n_cols); for (size_t i = 0; i < distances.n_cols; i++) { // Map distances (copy a column). distances.col(oldFromNewReferences[i]) = distancePtr->col(i); // Map indices of neighbors. for (size_t j = 0; j < distances.n_rows; j++) { resultingNeighbors(j, oldFromNewReferences[i]) = oldFromNewReferences[(*neighborPtr)(j, i)]; } } } else // Map only references. { // Set size of neighbor indices matrix correctly. resultingNeighbors.set_size(k, querySet.n_cols); // Map indices of neighbors. for (size_t i = 0; i < resultingNeighbors.n_cols; i++) { for (size_t j = 0; j < resultingNeighbors.n_rows; j++) { resultingNeighbors(j, i) = oldFromNewReferences[(*neighborPtr)(j, i)]; } } } // Finished with temporary matrix. delete neighborPtr; } else if (ownQueryTree) { // Set size of matrices correctly. resultingNeighbors.set_size(k, querySet.n_cols); distances.set_size(k, querySet.n_cols); for (size_t i = 0; i < distances.n_cols; i++) { // Map distances (copy a column). distances.col(oldFromNewQueries[i]) = distancePtr->col(i); // Map indices of neighbors. resultingNeighbors.col(oldFromNewQueries[i]) = neighborPtr->col(i); } // Finished with temporary matrices. delete neighborPtr; delete distancePtr; } } // Search
void remove_column_means(arma::Mat<double>& data, const arma::Col<double>& means) { if (data.n_cols != means.n_elem) throw std::range_error("Number of elements of means is not equal to the number of columns of data"); for (long i=0; i<long(data.n_cols); ++i) data.col(i) -= means(i); }