void Split(const arma::Mat<T>& input, const arma::Row<U>& inputLabel, arma::Mat<T>& trainData, arma::Mat<T>& testData, arma::Row<U>& trainLabel, arma::Row<U>& testLabel, const double testRatio) { const size_t testSize = static_cast<size_t>(input.n_cols * testRatio); const size_t trainSize = input.n_cols - testSize; trainData.set_size(input.n_rows, trainSize); testData.set_size(input.n_rows, testSize); trainLabel.set_size(trainSize); testLabel.set_size(testSize); const arma::Col<size_t> order = arma::shuffle(arma::linspace<arma::Col<size_t>>(0, input.n_cols - 1, input.n_cols)); for (size_t i = 0; i != trainSize; ++i) { trainData.col(i) = input.col(order[i]); trainLabel(i) = inputLabel(order[i]); } for (size_t i = 0; i != testSize; ++i) { testData.col(i) = input.col(order[i + trainSize]); testLabel(i) = inputLabel(order[i + trainSize]); } }
inline ParallelKinematicMachine3PRPR::ParallelKinematicMachine3PRPR() noexcept { setMinimalActiveJointActuations({0.1, 0.1, 0.1}); setMinimalActiveJointActuations({1.2, 1.2, 1.2}); setEndEffectorJointPositions({ -0.000066580445834, 0.106954081945581, -0.092751709777083, -0.053477040972790, 0.092818290222917, -0.053477040972790}); setRedundantJointStartPositions({ 0.1, 1.0392, 0.0, 0.8, 1.2, 0.8}); setRedundantJointEndPositions({ 1.1, 1.0392, 0.0, -0.2, 1.2, -0.2}); redundantJointStartToEndPositions_ = redundantJointEndPositions_ - redundantJointStartPositions_; redundantJointIndicies_ = arma::find(arma::any(redundantJointStartToEndPositions_)); redundantJointAngleSines_.set_size(redundantJointIndicies_.n_elem); redundantJointAngleCosines_.set_size(redundantJointIndicies_.n_elem); for (std::size_t n = 0; n < redundantJointIndicies_.n_elem; ++n) { const double redundantJointAngle = std::atan2(redundantJointStartToEndPositions_(1, n), redundantJointStartToEndPositions_(0, n)); redundantJointAngleSines_(n) = std::sin(redundantJointAngle); redundantJointAngleCosines_(n) = std::cos(redundantJointAngle); } }
std::vector<double> extract_row_vector(const arma::Mat<double>& data, long index) { if (index<0 || index >= long(data.n_rows)) throw std::range_error(join("Index out of range: ", index)); const arma::Row<double> row(data.row(index)); const double* memptr = row.memptr(); std::vector<double> result(memptr, memptr + row.n_elem); return std::move(result); }
void AdaBoost<MatType, WeakLearner>::Classify( const MatType& test, arma::Row<size_t>& predictedLabels) { arma::Row<size_t> tempPredictedLabels(test.n_cols); arma::mat cMatrix(classes, test.n_cols); cMatrix.zeros(); predictedLabels.set_size(test.n_cols); for (size_t i = 0; i < wl.size(); i++) { wl[i].Classify(test, tempPredictedLabels); for (size_t j = 0; j < tempPredictedLabels.n_cols; j++) cMatrix(tempPredictedLabels(j), j) += (alpha[i] * tempPredictedLabels(j)); } arma::colvec cMRow; arma::uword maxIndex; for (size_t i = 0; i < predictedLabels.n_cols; i++) { cMRow = cMatrix.unsafe_col(i); cMRow.max(maxIndex); predictedLabels(i) = maxIndex; } }
void SoftmaxRegression<OptimizerType>::Predict(const arma::mat& testData, arma::Row<size_t>& predictions) const { if (testData.n_rows != FeatureSize()) { std::ostringstream oss; oss << "SoftmaxRegression::Predict(): test data has " << testData.n_rows << " dimensions, but model has " << FeatureSize() << "dimensions"; throw std::invalid_argument(oss.str()); } // Calculate the probabilities for each test input. arma::mat hypothesis, probabilities; if (fitIntercept) { // In order to add the intercept term, we should compute following matrix: // [1; data] = arma::join_cols(ones(1, data.n_cols), data) // hypothesis = arma::exp(parameters * [1; data]). // // Since the cost of join maybe high due to the copy of original data, // split the hypothesis computation to two components. hypothesis = arma::exp( arma::repmat(parameters.col(0), 1, testData.n_cols) + parameters.cols(1, parameters.n_cols - 1) * testData); } else { hypothesis = arma::exp(parameters * testData); } probabilities = hypothesis / arma::repmat(arma::sum(hypothesis, 0), numClasses, 1); // Prepare necessary data. predictions.zeros(testData.n_cols); double maxProbability = 0; // For each test input. for (size_t i = 0; i < testData.n_cols; i++) { // For each class. for (size_t j = 0; j < numClasses; j++) { // If a higher class probability is encountered, change prediction. if (probabilities(j, i) > maxProbability) { maxProbability = probabilities(j, i); predictions(i) = j; } } // Set maximum probability to zero for the next input. maxProbability = 0; } }
arma::Cube<double> MultiLevelStewartPlatform::getModelImplementation( const arma::Col<double>& endEffectorPose, const arma::Row<double>& redundantJointsActuation) const { assert(redundantJointsActuation.n_elem == numberOfRedundantJoints_); assert(arma::all(redundantJointsActuation >= 0) && arma::all(redundantJointsActuation <= 1)); arma::Cube<double> model = platformLevels_.at(0).getModel(endEffectorPose, {}); for (arma::uword n = 1; n < platformLevels_.size(); ++n) { arma::join_slices(model, platformLevels_.at(n).getModel(redundantJointsActuation.subvec(6 * n - 6, 6 * n - 1), {})); } return model; }
void NaiveBayesClassifier<MatType>::Classify(const MatType& data, arma::Row<size_t>& results) { // Check that the number of features in the test data is same as in the // training data. Log::Assert(data.n_rows == means.n_rows); arma::vec probs = arma::log(probabilities); arma::mat invVar = 1.0 / variances; arma::mat testProbs = arma::repmat(probs.t(), data.n_cols, 1); results.set_size(data.n_cols); // No need to fill with anything yet. Log::Info << "Running Naive Bayes classifier on " << data.n_cols << " data points with " << data.n_rows << " features each." << std::endl; // Calculate the joint probability for each of the data points for each of the // means.n_cols. // Loop over every class. for (size_t i = 0; i < means.n_cols; i++) { // This is an adaptation of gmm::phi() for the case where the covariance is // a diagonal matrix. arma::mat diffs = data - arma::repmat(means.col(i), 1, data.n_cols); arma::mat rhs = -0.5 * arma::diagmat(invVar.col(i)) * diffs; arma::vec exponents(diffs.n_cols); for (size_t j = 0; j < diffs.n_cols; ++j) // log(exp(value)) == value exponents(j) = arma::accu(diffs.col(j) % rhs.unsafe_col(j)); // Calculate probability as sum of logarithm to decrease floating point // errors. testProbs.col(i) += (data.n_rows / -2.0 * log(2 * M_PI) - 0.5 * log(arma::det(arma::diagmat(variances.col(i)))) + exponents); } // Now calculate the label. for (size_t i = 0; i < data.n_cols; ++i) { // Find the index of the class with maximum probability for this point. arma::uword maxIndex = 0; arma::vec pointProbs = testProbs.row(i).t(); pointProbs.max(maxIndex); results[i] = maxIndex; } return; }
DecisionStump<MatType>::DecisionStump(const MatType& data, const arma::Row<size_t>& labels, const size_t classes, size_t inpBucketSize) { numClass = classes; bucketSize = inpBucketSize; // If classLabels are not all identical, proceed with training. int bestAtt = 0; double entropy; const double rootEntropy = CalculateEntropy<size_t>( labels.subvec(0, labels.n_elem - 1)); double gain, bestGain = 0.0; for (int i = 0; i < data.n_rows; i++) { // Go through each attribute of the data. if (IsDistinct<double>(data.row(i))) { // For each attribute with non-identical values, treat it as a potential // splitting attribute and calculate entropy if split on it. entropy = SetupSplitAttribute(data.row(i), labels); // Rcpp::Rcout << "Entropy for attribute " << i << " is " << entropy << ".\n"; gain = rootEntropy - entropy; // Find the attribute with the best entropy so that the gain is // maximized. // if (entropy < bestEntropy) // Instead of the above rule, we are maximizing gain, which was // what is returned from SetupSplitAttribute. if (gain < bestGain) { bestAtt = i; bestGain = gain; } } } splitAttribute = bestAtt; // Once the splitting column/attribute has been decided, train on it. TrainOnAtt<double>(data.row(splitAttribute), labels); }
void DecisionStump<MatType>::Classify(const MatType& test, arma::Row<size_t>& predictedLabels) { predictedLabels.set_size(test.n_cols); for (size_t i = 0; i < test.n_cols; i++) { // Determine which bin the test point falls into. // Assume first that it falls into the first bin, then proceed through the // bins until it is known which bin it falls into. size_t bin = 0; const double val = test(splitDimension, i); while (bin < split.n_elem - 1) { if (val < split(bin + 1)) break; ++bin; } predictedLabels(i) = binLabels(bin); } }
void RefinedStart::Cluster(const MatType& data, const size_t clusters, arma::Row<size_t>& assignments) const { // Perform the Bradley-Fayyad refined start algorithm, and get initial // centroids back. arma::mat centroids; Cluster(data, clusters, centroids); // Turn the final centroids into assignments. assignments.set_size(data.n_cols); for (size_t i = 0; i < data.n_cols; ++i) { // Find the closest centroid to this point. double minDistance = std::numeric_limits<double>::infinity(); size_t closestCluster = clusters; for (size_t j = 0; j < clusters; ++j) { // This is restricted to the L2 distance, and unfortunately it would take // a lot of refactoring and redesign to make this more general... we would // probably need to have KMeans take a template template parameter for the // initial partition policy. It's not clear how to best do this. const double distance = metric::EuclideanDistance::Evaluate(data.col(i), centroids.col(j)); if (distance < minDistance) { minDistance = distance; closestCluster = j; } } // Assign the point to its closest cluster. assignments[i] = closestCluster; } }
double MultiLevelStewartPlatform::getEndEffectorPoseErrorImplementation( const arma::Col<double>& endEffectorPose, const arma::Row<double>& redundantJointsActuation) const { assert(redundantJointsActuation.n_elem == numberOfRedundantJoints_); assert(arma::all(redundantJointsActuation >= 0) && arma::all(redundantJointsActuation <= 1)); double endEffectorPoseError = platformLevels_.at(0).getEndEffectorPoseError(endEffectorPose, {}); for (arma::uword n = 1; n < platformLevels_.size(); ++n) { const double partialEndEffectorPoseError = platformLevels_.at(n).getEndEffectorPoseError(redundantJointsActuation.subvec(6 * n - 6, 6 * n - 1), {}); assert(partialEndEffectorPoseError >= 0); endEffectorPoseError += partialEndEffectorPoseError; } return endEffectorPoseError; }
arma::Row<double> MultiLevelStewartPlatform::getActuationImplementation( const arma::Col<double>& endEffectorPose, const arma::Row<double>& redundantJointsActuation) const { assert(redundantJointsActuation.n_elem == numberOfRedundantJoints_); assert(arma::all(redundantJointsActuation >= 0) && arma::all(redundantJointsActuation <= 1)); arma::Row<double> actuation = platformLevels_.at(0).getActuation(endEffectorPose, {}); for (arma::uword n = 1; n < platformLevels_.size(); ++n) { const arma::Row<double>& partialActuation = platformLevels_.at(n).getActuation(redundantJointsActuation.subvec(6 * n - 6, 6 * n - 1), {}); assert(partialActuation.n_elem == numberOfActiveJoints_); assert(arma::all(partialActuation >= platformLevels_.at(n).minimalActiveJointsActuation_) && arma::all(partialActuation <= platformLevels_.at(n).maximalActiveJointsActuation_)); actuation = arma::join_rows(actuation, partialActuation); } return actuation; }