void Split(const arma::Mat<T>& input, const arma::Row<U>& inputLabel, arma::Mat<T>& trainData, arma::Mat<T>& testData, arma::Row<U>& trainLabel, arma::Row<U>& testLabel, const double testRatio) { const size_t testSize = static_cast<size_t>(input.n_cols * testRatio); const size_t trainSize = input.n_cols - testSize; trainData.set_size(input.n_rows, trainSize); testData.set_size(input.n_rows, testSize); trainLabel.set_size(trainSize); testLabel.set_size(testSize); const arma::Col<size_t> order = arma::shuffle(arma::linspace<arma::Col<size_t>>(0, input.n_cols - 1, input.n_cols)); for (size_t i = 0; i != trainSize; ++i) { trainData.col(i) = input.col(order[i]); trainLabel(i) = inputLabel(order[i]); } for (size_t i = 0; i != testSize; ++i) { testData.col(i) = input.col(order[i + trainSize]); testLabel(i) = inputLabel(order[i + trainSize]); } }
inline ParallelKinematicMachine3PRPR::ParallelKinematicMachine3PRPR() noexcept { setMinimalActiveJointActuations({0.1, 0.1, 0.1}); setMinimalActiveJointActuations({1.2, 1.2, 1.2}); setEndEffectorJointPositions({ -0.000066580445834, 0.106954081945581, -0.092751709777083, -0.053477040972790, 0.092818290222917, -0.053477040972790}); setRedundantJointStartPositions({ 0.1, 1.0392, 0.0, 0.8, 1.2, 0.8}); setRedundantJointEndPositions({ 1.1, 1.0392, 0.0, -0.2, 1.2, -0.2}); redundantJointStartToEndPositions_ = redundantJointEndPositions_ - redundantJointStartPositions_; redundantJointIndicies_ = arma::find(arma::any(redundantJointStartToEndPositions_)); redundantJointAngleSines_.set_size(redundantJointIndicies_.n_elem); redundantJointAngleCosines_.set_size(redundantJointIndicies_.n_elem); for (std::size_t n = 0; n < redundantJointIndicies_.n_elem; ++n) { const double redundantJointAngle = std::atan2(redundantJointStartToEndPositions_(1, n), redundantJointStartToEndPositions_(0, n)); redundantJointAngleSines_(n) = std::sin(redundantJointAngle); redundantJointAngleCosines_(n) = std::cos(redundantJointAngle); } }
void AdaBoost<MatType, WeakLearner>::Classify( const MatType& test, arma::Row<size_t>& predictedLabels) { arma::Row<size_t> tempPredictedLabels(test.n_cols); arma::mat cMatrix(classes, test.n_cols); cMatrix.zeros(); predictedLabels.set_size(test.n_cols); for (size_t i = 0; i < wl.size(); i++) { wl[i].Classify(test, tempPredictedLabels); for (size_t j = 0; j < tempPredictedLabels.n_cols; j++) cMatrix(tempPredictedLabels(j), j) += (alpha[i] * tempPredictedLabels(j)); } arma::colvec cMRow; arma::uword maxIndex; for (size_t i = 0; i < predictedLabels.n_cols; i++) { cMRow = cMatrix.unsafe_col(i); cMRow.max(maxIndex); predictedLabels(i) = maxIndex; } }
void NaiveBayesClassifier<MatType>::Classify(const MatType& data, arma::Row<size_t>& results) { // Check that the number of features in the test data is same as in the // training data. Log::Assert(data.n_rows == means.n_rows); arma::vec probs = arma::log(probabilities); arma::mat invVar = 1.0 / variances; arma::mat testProbs = arma::repmat(probs.t(), data.n_cols, 1); results.set_size(data.n_cols); // No need to fill with anything yet. Log::Info << "Running Naive Bayes classifier on " << data.n_cols << " data points with " << data.n_rows << " features each." << std::endl; // Calculate the joint probability for each of the data points for each of the // means.n_cols. // Loop over every class. for (size_t i = 0; i < means.n_cols; i++) { // This is an adaptation of gmm::phi() for the case where the covariance is // a diagonal matrix. arma::mat diffs = data - arma::repmat(means.col(i), 1, data.n_cols); arma::mat rhs = -0.5 * arma::diagmat(invVar.col(i)) * diffs; arma::vec exponents(diffs.n_cols); for (size_t j = 0; j < diffs.n_cols; ++j) // log(exp(value)) == value exponents(j) = arma::accu(diffs.col(j) % rhs.unsafe_col(j)); // Calculate probability as sum of logarithm to decrease floating point // errors. testProbs.col(i) += (data.n_rows / -2.0 * log(2 * M_PI) - 0.5 * log(arma::det(arma::diagmat(variances.col(i)))) + exponents); } // Now calculate the label. for (size_t i = 0; i < data.n_cols; ++i) { // Find the index of the class with maximum probability for this point. arma::uword maxIndex = 0; arma::vec pointProbs = testProbs.row(i).t(); pointProbs.max(maxIndex); results[i] = maxIndex; } return; }
void DecisionStump<MatType>::Classify(const MatType& test, arma::Row<size_t>& predictedLabels) { predictedLabels.set_size(test.n_cols); for (size_t i = 0; i < test.n_cols; i++) { // Determine which bin the test point falls into. // Assume first that it falls into the first bin, then proceed through the // bins until it is known which bin it falls into. size_t bin = 0; const double val = test(splitDimension, i); while (bin < split.n_elem - 1) { if (val < split(bin + 1)) break; ++bin; } predictedLabels(i) = binLabels(bin); } }
void RefinedStart::Cluster(const MatType& data, const size_t clusters, arma::Row<size_t>& assignments) const { // Perform the Bradley-Fayyad refined start algorithm, and get initial // centroids back. arma::mat centroids; Cluster(data, clusters, centroids); // Turn the final centroids into assignments. assignments.set_size(data.n_cols); for (size_t i = 0; i < data.n_cols; ++i) { // Find the closest centroid to this point. double minDistance = std::numeric_limits<double>::infinity(); size_t closestCluster = clusters; for (size_t j = 0; j < clusters; ++j) { // This is restricted to the L2 distance, and unfortunately it would take // a lot of refactoring and redesign to make this more general... we would // probably need to have KMeans take a template template parameter for the // initial partition policy. It's not clear how to best do this. const double distance = metric::EuclideanDistance::Evaluate(data.col(i), centroids.col(j)); if (distance < minDistance) { minDistance = distance; closestCluster = j; } } // Assign the point to its closest cluster. assignments[i] = closestCluster; } }