size_t CoverTree<MetricType, StatisticType, MatType, RootPointPolicy>:: SortPointSet(arma::Col<size_t>& indices, arma::vec& distances, const size_t childFarSetSize, const size_t childUsedSetSize, const size_t farSetSize) { // We'll use low-level memcpy calls ourselves, just to ensure it's done // quickly and the way we want it to be. Unfortunately this takes up more // memory than one-element swaps, but there's not a great way around that. const size_t bufferSize = std::min(farSetSize, childUsedSetSize); const size_t bigCopySize = std::max(farSetSize, childUsedSetSize); // Sanity check: there is no need to sort if the buffer size is going to be // zero. if (bufferSize == 0) return (childFarSetSize + farSetSize); size_t* indicesBuffer = new size_t[bufferSize]; ElemType* distancesBuffer = new ElemType[bufferSize]; // The start of the memory region to copy to the buffer. const size_t bufferFromLocation = ((bufferSize == farSetSize) ? (childFarSetSize + childUsedSetSize) : childFarSetSize); // The start of the memory region to move directly to the new place. const size_t directFromLocation = ((bufferSize == farSetSize) ? childFarSetSize : (childFarSetSize + childUsedSetSize)); // The destination to copy the buffer back to. const size_t bufferToLocation = ((bufferSize == farSetSize) ? childFarSetSize : (childFarSetSize + farSetSize)); // The destination of the directly moved memory region. const size_t directToLocation = ((bufferSize == farSetSize) ? (childFarSetSize + farSetSize) : childFarSetSize); // Copy the smaller piece to the buffer. memcpy(indicesBuffer, indices.memptr() + bufferFromLocation, sizeof(size_t) * bufferSize); memcpy(distancesBuffer, distances.memptr() + bufferFromLocation, sizeof(ElemType) * bufferSize); // Now move the other memory. memmove(indices.memptr() + directToLocation, indices.memptr() + directFromLocation, sizeof(size_t) * bigCopySize); memmove(distances.memptr() + directToLocation, distances.memptr() + directFromLocation, sizeof(ElemType) * bigCopySize); // Now copy the temporary memory to the right place. memcpy(indices.memptr() + bufferToLocation, indicesBuffer, sizeof(size_t) * bufferSize); memcpy(distances.memptr() + bufferToLocation, distancesBuffer, sizeof(ElemType) * bufferSize); delete[] indicesBuffer; delete[] distancesBuffer; // This returns the complete size of the far set. return (childFarSetSize + farSetSize); }
/** * WARNING: Only use this method if you know what you are doing! * * This deallocates any data currently in this matrix. * It then takes the data from the given arguments; * this transfers ownership of the data to this class, * clearing the data from the given arguments. * This permits piecemeal construction of a CSC matrix without * unnecessary reallocation. * * @param new_row_indices WARNING: These MUST be sorted in increasing * order (for each column). */ void reset_nocopy(size_type m, size_type n, arma::Col<size_type>& new_col_offsets, arma::Col<size_type>& new_row_indices, arma::Col<value_type>& new_values) { assert(new_col_offsets.size() == n + 1); assert(new_row_indices.size() == new_values.size()); if (new_row_indices.size() != 0) assert(new_row_indices[new_row_indices.size()-1] < m); // TO DO: More validity checks. n_rows = m; n_cols = n; // TO DO: USE ARMA SWAP METHODS ONCE IMPLEMENTED std::swap(col_offsets_, new_col_offsets); std::swap(row_indices_, new_row_indices); std::swap(values_, new_values); }
inline double ParallelKinematicMachine3PUPS::getEndEffectorPoseError( const arma::Col<double>::fixed<6>& endEffectorPose, const arma::Row<double>& redundantJointActuations) const { const arma::Cube<double>::fixed<3, 3, 2>& model = getModel(endEffectorPose, redundantJointActuations); const arma::Mat<double>::fixed<3, 3>& baseJoints = model.slice(0); const arma::Mat<double>::fixed<3, 3>& endEffectorJoints = model.slice(1); arma::Mat<double>::fixed<3, 3> endEffectorJointsRotated = endEffectorJoints; endEffectorJointsRotated.each_col() -= endEffectorPose.subvec(0, 1); const arma::Mat<double>::fixed<3, 3>& baseToEndEffectorJointPositions = endEffectorJoints - baseJoints; const arma::Row<double>::fixed<3>& baseToEndEffectorJointActuations = arma::sqrt(arma::sum(arma::square(baseToEndEffectorJointPositions))); if (arma::any(baseToEndEffectorJointActuations < minimalActiveJointActuations_) || arma::any(baseToEndEffectorJointActuations > maximalActiveJointActuations_)) { return 0.0; } arma::Mat<double>::fixed<6, 3> forwardKinematic; forwardKinematic.head_rows(3) = baseToEndEffectorJointPositions; for (std::size_t n = 0; n < baseToEndEffectorJointPositions.n_cols; ++n) { forwardKinematic.submat(3, n, 5, n) = arma::cross(endEffectorJointsRotated.col(n), baseToEndEffectorJointPositions.col(n)); } arma::Mat<double> inverseKinematic(6, 3 + redundantJointIndicies_.n_elem, arma::fill::zeros); inverseKinematic.diag() = -arma::sqrt(arma::sum(arma::square(baseToEndEffectorJointPositions))); for (std::size_t n = 0; n < redundantJointIndicies_.n_elem; ++n) { const unsigned int& redundantJointIndex = redundantJointIndicies_(n); inverseKinematic(n, 3 + n) = arma::dot(baseToEndEffectorJointPositions.col(redundantJointIndex), redundantJointAngles_.col(redundantJointIndex)); } return -1.0 / arma::cond(arma::solve(forwardKinematic.t(), inverseKinematic)); }
void permuteVector(const arma::Col<ValueType> &original, arma::Col<ValueType> &permuted) const { const int dim = original.n_elem; permuted.set_size(dim); for (int i = 0; i < dim; ++i) permuted(m_permutedIndices[i]) = original(i); }
inline double ParallelKinematicMachine3PRPR::getEndEffectorPoseError( const arma::Col<double>::fixed<3>& endEffectorPose, const arma::Row<double>& redundantJointActuations) const { const arma::Cube<double>::fixed<2, 3, 2>& model = getModel(endEffectorPose, redundantJointActuations); const arma::Mat<double>::fixed<2, 3>& baseJointPositions = model.slice(1); const arma::Mat<double>::fixed<2, 3>& endEffectorJointPositions = model.slice(1); arma::Mat<double>::fixed<2, 3> endEffectorJointPositionsRotated = endEffectorJointPositions; endEffectorJointPositionsRotated.each_col() -= endEffectorPose.subvec(0, 1); const arma::Mat<double>::fixed<2, 3>& baseToEndEffectorJointPositions = endEffectorJointPositions - baseJointPositions; const arma::Row<double>::fixed<3>& baseToEndEffectorJointActuations = arma::sqrt(arma::sum(arma::square(baseToEndEffectorJointPositions))); if (arma::any(baseToEndEffectorJointActuations < minimalActiveJointActuations_) || arma::any(baseToEndEffectorJointActuations > maximalActiveJointActuations_)) { return 0.0; } arma::Mat<double>::fixed<3, 3> forwardKinematic; forwardKinematic.head_rows(2) = baseToEndEffectorJointPositions; forwardKinematic.row(2) = -forwardKinematic.row(0) % endEffectorJointPositionsRotated.row(1) + forwardKinematic.row(1) % endEffectorJointPositionsRotated.row(0); arma::Mat<double> inverseKinematic(3, 3 + redundantJointIndicies_.n_elem, arma::fill::zeros); inverseKinematic.diag() = -baseToEndEffectorJointActuations; for (std::size_t n = 0; n < redundantJointIndicies_.n_elem; ++n) { const unsigned int& redundantJointIndex = redundantJointIndicies_(n); inverseKinematic(n, 3 + n) = forwardKinematic(redundantJointIndex, 0) * redundantJointAngleCosines_(n) + forwardKinematic(redundantJointIndex, 1) * redundantJointAngleSines_(n); } return -1.0 / arma::cond(arma::solve(forwardKinematic.t(), inverseKinematic)); }
void unpermuteVector(const arma::Col<ValueType> &permuted, arma::Col<ValueType> &original) const { const int dim = permuted.n_elem; original.set_size(dim); for (int i = 0; i < dim; ++i) original(i) = permuted(m_permutedIndices[i]); }
double PolyharmonicSplineRadialBasisFunction::getPolynomialTailValueImplementation( const arma::Col<double>& parameter, const arma::Col<double>& polynomialCoefficients) const { if (polynomialOrder_ > 1) { return arma::dot(polynomialCoefficients.head(numberOfDimensions_), parameter) + polynomialCoefficients(polynomialCoefficients.n_elem - 1); } else { return polynomialCoefficients(polynomialCoefficients.n_elem - 1); } }
inline void HRectBound<MetricType, ElemType>::Center( arma::Col<ElemType>& center) const { // Set size correctly if necessary. if (!(center.n_elem == dim)) center.set_size(dim); for (size_t i = 0; i < dim; i++) center(i) = bounds[i].Mid(); }
void OptimisationProblem<T>::setParameterTranslation( const arma::Col<T>& parameterTranslation) { verify(parameterTranslation.n_elem == numberOfDimensions_, "The number of elements must be equal to the number of dimensions."); verify(parameterTranslation.is_finite(), "All elements must be finite."); parameterTranslation_ = parameterTranslation; // Resets all counters and caches, as the problem could be changed. reset(); }
Teuchos::RCP<Thyra::DefaultSpmdVector<ValueType> > wrapInTrilinosVector(arma::Col<ValueType>& col) { size_t size = col.n_rows; Teuchos::ArrayRCP<ValueType> trilinosArray = Teuchos::arcp(col.memptr(), 0 /* lowerOffset */, size, false /* doesn't own memory */); typedef Thyra::DefaultSpmdVector<ValueType> TrilinosVector; return Teuchos::RCP<TrilinosVector>(new TrilinosVector( Thyra::defaultSpmdVectorSpace<ValueType>(size), trilinosArray, 1 /* stride */)); }
double HMM<Distribution>::Predict(const arma::mat& dataSeq, arma::Col<size_t>& stateSeq) const { // This is an implementation of the Viterbi algorithm for finding the most // probable sequence of states to produce the observed data sequence. We // don't use log-likelihoods to save that little bit of time, but we'll // calculate the log-likelihood at the end of it all. stateSeq.set_size(dataSeq.n_cols); arma::mat logStateProb(transition.n_rows, dataSeq.n_cols); arma::mat stateSeqBack(transition.n_rows, dataSeq.n_cols); // Store the logs of the transposed transition matrix. This is because we // will be using the rows of the transition matrix. arma::mat logTrans(log(trans(transition))); // The calculation of the first state is slightly different; the probability // of the first state being state j is the maximum probability that the state // came to be j from another state. logStateProb.col(0).zeros(); for (size_t state = 0; state < transition.n_rows; state++) { logStateProb(state, 0) = log(initial[state] * emission[state].Probability(dataSeq.unsafe_col(0))); stateSeqBack(state, 0) = state; } // Store the best first state. arma::uword index; for (size_t t = 1; t < dataSeq.n_cols; t++) { // Assemble the state probability for this element. // Given that we are in state j, we use state with the highest probability // of being the previous state. for (size_t j = 0; j < transition.n_rows; j++) { arma::vec prob = logStateProb.col(t - 1) + logTrans.col(j); logStateProb(j, t) = prob.max(index) + log(emission[j].Probability(dataSeq.unsafe_col(t))); stateSeqBack(j, t) = index; } } // Backtrack to find the most probable state sequence. logStateProb.unsafe_col(dataSeq.n_cols - 1).max(index); stateSeq[dataSeq.n_cols - 1] = index; for (size_t t = 2; t <= dataSeq.n_cols; t++) stateSeq[dataSeq.n_cols - t] = stateSeqBack(stateSeq[dataSeq.n_cols - t + 1], dataSeq.n_cols - t + 1); return logStateProb(stateSeq(dataSeq.n_cols - 1), dataSeq.n_cols - 1); }
void NaiveBayesClassifier<MatType>::Classify(const MatType& data, arma::Col<size_t>& results) { // Check that the number of features in the test data is same as in the // training data. Log::Assert(data.n_rows == means.n_rows); arma::vec probs = arma::log(probabilities); arma::mat invVar = 1.0 / variances; arma::mat testProbs = arma::repmat(probs.t(), data.n_cols, 1); results.set_size(data.n_cols); // No need to fill with anything yet. Log::Info << "Running Naive Bayes classifier on " << data.n_cols << " data points with " << data.n_rows << " features each." << std::endl; // Calculate the joint probability for each of the data points for each of the // means.n_cols. // Loop over every class. for (size_t i = 0; i < means.n_cols; i++) { // This is an adaptation of gmm::phi() for the case where the covariance is // a diagonal matrix. arma::mat diffs = data - arma::repmat(means.col(i), 1, data.n_cols); arma::mat rhs = -0.5 * arma::diagmat(invVar.col(i)) * diffs; arma::vec exponents(diffs.n_cols); for (size_t j = 0; j < diffs.n_cols; ++j) exponents(j) = std::exp(arma::accu(diffs.col(j) % rhs.unsafe_col(j))); testProbs.col(i) += log(pow(2 * M_PI, (double) data.n_rows / -2.0) * pow(det(arma::diagmat(invVar.col(i))), -0.5) * exponents); } // Now calculate the label. for (size_t i = 0; i < data.n_cols; ++i) { // Find the index of the class with maximum probability for this point. arma::uword maxIndex = 0; arma::vec pointProbs = testProbs.row(i).t(); pointProbs.max(maxIndex); results[i] = maxIndex; } return; }
bool CmpParser::readData(arma::Col<double> &data,short code){ unsigned int dim=hdr.sampSize/4; data.resize(dim); if (code==0){ float *p=new float; for (int i=0;i<dim;i++){ if (file.read((char*)p,sizeof(float))==0) return false; SwapInt32((int*)p); data[i]=*p; } delete p; } return true; }
//ReadHTKHeader(); bool CmpParser::getNextFrame(arma::Col<double> &frame, int derivate){ if (file.is_open()==false) return false; if (hdr.sampKind&1024){//compressed mode. TODO to be implemented std::cerr<<"HTK compressed mode is not yet supported"<<std::endl; return false; } else{ unsigned int dim=hdr.sampSize/4; arma::Col<double> data; if (!this->readData(data)){ return false; } if (prev1.n_rows!=data.n_rows){ prev2=data; if (!this->readData(prev1)){ return false; } if (!this->readData(data)){ return false; } } if (derivate==2){ frame.resize(3*dim); frame.subvec(0, dim-1)=prev1; frame.subvec(dim, 2*dim-1)=(data-prev2)/2; frame.subvec(2*dim, 3*dim-1)=prev2-2*prev1+data; } else if (derivate==1){ frame.resize(2*dim); frame.subvec(0, dim-1)=prev1; frame.subvec(dim, 2*dim-1)=(data-prev2)/2; } else{ frame.resize(dim); frame=data; } prev2=prev1; prev1=data; //std::cout<<"data:"<<std::endl; //std::cout<<frame<<std::endl; return true; } }
//This function is specific to a single problem void calculateDependentVariables(const arma::Mat<std::complex<double> >& myOffsets, const arma::Col<std::complex<double> >& myCurrentGuess, arma::Col<std::complex<double> >& targetsCalculated) { //Evaluate a dependent variable for each iteration //The arma::Col allows this to be expressed as a vector operation for(int i = 0; i < NUMDIMENSIONS; i++) { targetsCalculated[i] = arma::sum(pow(myCurrentGuess.subvec(0,1) - myOffsets.row(i).subvec(0,1).t(),2.0)); targetsCalculated[i] = targetsCalculated[i] + myCurrentGuess[2]*pow(-1.0, i) - myOffsets.row(i)[2]; //std::cout << targetsCalculated[i] << std::endl; } //std::cout << "model evaluated *************************" << std::endl; //std::cout << targetsCalculated << std::endl; //std::cout << myOffsets << std::endl; }
void HMM<Distribution>::Generate(const size_t length, arma::mat& dataSequence, arma::Col<size_t>& stateSequence, const size_t startState) const { // Set vectors to the right size. stateSequence.set_size(length); dataSequence.set_size(dimensionality, length); // Set start state (default is 0). stateSequence[0] = startState; // Choose first emission state. double randValue = math::Random(); // We just have to find where our random value sits in the probability // distribution of emissions for our starting state. dataSequence.col(0) = emission[startState].Random(); // Now choose the states and emissions for the rest of the sequence. for (size_t t = 1; t < length; t++) { // First choose the hidden state. randValue = math::Random(); // Now find where our random value sits in the probability distribution of // state changes. double probSum = 0; for (size_t st = 0; st < transition.n_rows; st++) { probSum += transition(st, stateSequence[t - 1]); if (randValue <= probSum) { stateSequence[t] = st; break; } } // Now choose the emission. dataSequence.col(t) = emission[stateSequence[t]].Random(); } }
inline arma::Cube<double>::fixed<2, 3, 2> ParallelKinematicMachine3PRPR::getModel( const arma::Col<double>::fixed<3>& endEffectorPose, const arma::Row<double>& redundantJointActuations) const { verify(arma::any(arma::vectorise(redundantJointActuations < 0)) || arma::any(arma::vectorise(redundantJointActuations > 1)), "All values for the actuation of redundantion joints must be between [0, 1]."); verify(redundantJointActuations.n_elem == redundantJointIndicies_.n_elem, "The number of redundant actuations must be equal to the number of redundant joints."); arma::Cube<double>::fixed<2, 3, 2> model; const arma::Col<double>::fixed<2>& endEffectorPosition = endEffectorPose.subvec(0, 1); const double& endEffectorAngle = endEffectorPose(2); model.slice(0) = redundantJointStartPositions_; for (std::size_t n = 0; n < redundantJointIndicies_.n_elem; n++) { const unsigned int& redundantJointIndex = redundantJointIndicies_(n); model.slice(0).col(redundantJointIndex) += redundantJointActuations(redundantJointIndex) * redundantJointStartToEndPositions_.col(redundantJointIndex); } model.slice(1) = get2DRotation(endEffectorAngle) * endEffectorJointPositions_; model.slice(1).each_col() += endEffectorPosition; return model; }
void GMM<FittingType>::Classify(const arma::mat& observations, arma::Col<size_t>& labels) const { // This is not the best way to do this! // We should not have to fill this with values, because each one should be // overwritten. labels.set_size(observations.n_cols); for (size_t i = 0; i < observations.n_cols; ++i) { // Find maximum probability component. double probability = 0; for (size_t j = 0; j < gaussians; ++j) { double newProb = Probability(observations.unsafe_col(i), j); if (newProb >= probability) { probability = newProb; labels[i] = j; } } } }
double PellegMooreKMeans<MetricType, MatType>::Iterate( const arma::mat& centroids, arma::mat& newCentroids, arma::Col<size_t>& counts) { newCentroids.zeros(centroids.n_rows, centroids.n_cols); counts.zeros(centroids.n_cols); // Create rules object. typedef PellegMooreKMeansRules<MetricType, TreeType> RulesType; RulesType rules(dataset, centroids, newCentroids, counts, metric); // Use single-tree traverser. typename TreeType::template SingleTreeTraverser<RulesType> traverser(rules); // Now, do a traversal with a fake query index (since the query index is // irrelevant; we are checking each node with all clusters. traverser.Traverse(0, *tree); distanceCalculations += rules.DistanceCalculations(); // Now, calculate how far the clusters moved, after normalizing them. double residual = 0.0; for (size_t c = 0; c < centroids.n_cols; ++c) { if (counts[c] > 0) { newCentroids.col(c) /= counts(c); residual += std::pow(metric.Evaluate(centroids.col(c), newCentroids.col(c)), 2.0); } } distanceCalculations += centroids.n_cols; return std::sqrt(residual); }
inline void CoverTree<MetricType, StatisticType, MatType, RootPointPolicy>::CreateChildren( arma::Col<size_t>& indices, arma::vec& distances, size_t nearSetSize, size_t& farSetSize, size_t& usedSetSize) { // Determine the next scale level. This should be the first level where there // are any points in the far set. So, if we know the maximum distance in the // distances array, this will be the largest i such that // maxDistance > pow(base, i) // and using this for the scale factor should guarantee we are not creating an // implicit node. If the maximum distance is 0, every point in the near set // will be created as a leaf, and a child to this node. We also do not need // to change the furthestChildDistance or furthestDescendantDistance. const ElemType maxDistance = max(distances.rows(0, nearSetSize + farSetSize - 1)); if (maxDistance == 0) { // Make the self child at the lowest possible level. // This should not modify farSetSize or usedSetSize. size_t tempSize = 0; children.push_back(new CoverTree(*dataset, base, point, INT_MIN, this, 0, indices, distances, 0, tempSize, usedSetSize, *metric)); distanceComps += children.back()->DistanceComps(); // Every point in the near set should be a leaf. for (size_t i = 0; i < nearSetSize; ++i) { // farSetSize and usedSetSize will not be modified. children.push_back(new CoverTree(*dataset, base, indices[i], INT_MIN, this, distances[i], indices, distances, 0, tempSize, usedSetSize, *metric)); distanceComps += children.back()->DistanceComps(); usedSetSize++; } // The number of descendants is just the number of children, because each of // them are leaves and contain one point. numDescendants = children.size(); // Re-sort the dataset. We have // [ used | far | other used ] // and we want // [ far | all used ]. SortPointSet(indices, distances, 0, usedSetSize, farSetSize); return; } const int nextScale = std::min(scale, (int) ceil(log(maxDistance) / log(base))) - 1; const ElemType bound = pow(base, nextScale); // First, make the self child. We must split the given near set into the near // set and far set for the self child. size_t childNearSetSize = SplitNearFar(indices, distances, bound, nearSetSize); // Build the self child (recursively). size_t childFarSetSize = nearSetSize - childNearSetSize; size_t childUsedSetSize = 0; children.push_back(new CoverTree(*dataset, base, point, nextScale, this, 0, indices, distances, childNearSetSize, childFarSetSize, childUsedSetSize, *metric)); // Don't double-count the self-child (so, subtract one). numDescendants += children[0]->NumDescendants(); // The self-child can't modify the furthestChildDistance away from 0, but it // can modify the furthestDescendantDistance. furthestDescendantDistance = children[0]->FurthestDescendantDistance(); // Remove any implicit nodes we may have created. RemoveNewImplicitNodes(); distanceComps += children[0]->DistanceComps(); // Now the arrays, in memory, look like this: // [ childFar | childUsed | far | used ] // but we need to move the used points past our far set: // [ childFar | far | childUsed + used ] // and keeping in mind that childFar = our near set, // [ near | far | childUsed + used ] // is what we are trying to make. SortPointSet(indices, distances, childFarSetSize, childUsedSetSize, farSetSize); // Update size of near set and used set. nearSetSize -= childUsedSetSize; usedSetSize += childUsedSetSize; // Now for each point in the near set, we need to make children. To save // computation later, we'll create an array holding the points in the near // set, and then after each run we'll check which of those (if any) were used // and we will remove them. ...if that's faster. I think it is. while (nearSetSize > 0) { size_t newPointIndex = nearSetSize - 1; // Swap to front if necessary. if (newPointIndex != 0) { const size_t tempIndex = indices[newPointIndex]; const ElemType tempDist = distances[newPointIndex]; indices[newPointIndex] = indices[0]; distances[newPointIndex] = distances[0]; indices[0] = tempIndex; distances[0] = tempDist; } // Will this be a new furthest child? if (distances[0] > furthestDescendantDistance) furthestDescendantDistance = distances[0]; // If there's only one point left, we don't need this crap. if ((nearSetSize == 1) && (farSetSize == 0)) { size_t childNearSetSize = 0; children.push_back(new CoverTree(*dataset, base, indices[0], nextScale, this, distances[0], indices, distances, childNearSetSize, farSetSize, usedSetSize, *metric)); distanceComps += children.back()->DistanceComps(); numDescendants += children.back()->NumDescendants(); // Because the far set size is 0, we don't have to do any swapping to // move the point into the used set. ++usedSetSize; --nearSetSize; // And we're done. break; } // Create the near and far set indices and distance vectors. We don't fill // in the self-point, yet. arma::Col<size_t> childIndices(nearSetSize + farSetSize); childIndices.rows(0, (nearSetSize + farSetSize - 2)) = indices.rows(1, nearSetSize + farSetSize - 1); arma::vec childDistances(nearSetSize + farSetSize); // Build distances for the child. ComputeDistances(indices[0], childIndices, childDistances, nearSetSize + farSetSize - 1); // Split into near and far sets for this point. childNearSetSize = SplitNearFar(childIndices, childDistances, bound, nearSetSize + farSetSize - 1); childFarSetSize = PruneFarSet(childIndices, childDistances, base * bound, childNearSetSize, (nearSetSize + farSetSize - 1)); // Now that we know the near and far set sizes, we can put the used point // (the self point) in the correct place; now, when we call // MoveToUsedSet(), it will move the self-point correctly. The distance // does not matter. childIndices(childNearSetSize + childFarSetSize) = indices[0]; childDistances(childNearSetSize + childFarSetSize) = 0; // Build this child (recursively). childUsedSetSize = 1; // Mark self point as used. children.push_back(new CoverTree(*dataset, base, indices[0], nextScale, this, distances[0], childIndices, childDistances, childNearSetSize, childFarSetSize, childUsedSetSize, *metric)); numDescendants += children.back()->NumDescendants(); // Remove any implicit nodes. RemoveNewImplicitNodes(); distanceComps += children.back()->DistanceComps(); // Now with the child created, it returns the childIndices and // childDistances vectors in this form: // [ childFar | childUsed ] // For each point in the childUsed set, we must move that point to the used // set in our own vector. MoveToUsedSet(indices, distances, nearSetSize, farSetSize, usedSetSize, childIndices, childFarSetSize, childUsedSetSize); } // Calculate furthest descendant. for (size_t i = (nearSetSize + farSetSize); i < (nearSetSize + farSetSize + usedSetSize); ++i) if (distances[i] > furthestDescendantDistance) furthestDescendantDistance = distances[i]; }
SCENARIO("bbob::WeierstrassFunction.objectiveFunction_", "[bbob::WeierstrassFunction][bbob::WeierstrassFunction.objectiveFunction_]") { GIVEN("Some parameters") { // All expected objective values where generated for a 40-dimensional problem instance. const arma::uword numberOfDimensions = 40; CAPTURE(numberOfDimensions); TestWeierstrassFunction optimisationProblem(numberOfDimensions); REQUIRE(optimisationProblem.rotationR_.load(::rootTestDataDirectory + "/optimisationProblem/benchmarkOptimisationProblem/blackBoxOptimisationBenchmark/_rotationMatrix_40x40_1.input")); REQUIRE(optimisationProblem.rotationQ_.load(::rootTestDataDirectory + "/optimisationProblem/benchmarkOptimisationProblem/blackBoxOptimisationBenchmark/_rotationMatrix_40x40_2.input")); arma::Mat<double> parameters; REQUIRE(parameters.load(::rootTestDataDirectory + "/optimisationProblem/benchmarkOptimisationProblem/blackBoxOptimisationBenchmark/_parameters_40x100.input")); THEN("Return their objective value") { arma::Col<double> expectedObjectiveValues; REQUIRE(expectedObjectiveValues.load(::rootTestDataDirectory + "/optimisationProblem/benchmarkOptimisationProblem/blackBoxOptimisationBenchmark/weierstrassFunction_dim40_1x100.expected")); for (arma::uword n = 0; n < parameters.n_cols; ++n) { CHECK(optimisationProblem.objectiveFunction_(parameters.col(n)) == Approx(expectedObjectiveValues(n))); } } } } SCENARIO("bbob::WeierstrassFunction.getNormalisedObjectiveValue", "[bbob::WeierstrassFunction][bbob::WeierstrassFunction.getNormalisedObjectiveValue]") { GIVEN("A parameter") { const arma::uword numberOfDimensions = SYNCHRONISED(1 + discreteRandomNumber()); CAPTURE(numberOfDimensions); const arma::Col<double>& parameter = arma::normalise(continuousRandomNumbers(numberOfDimensions));
arma::Col<T> OptimisationProblem<T>::getDiversifiedParameter( const arma::Col<T>& parameter) const noexcept { assert(parameter.n_elem == numberOfDimensions_); return parameterRotation_ * (parameterScaling_ % parameter.elem(parameterPermutation_) - parameterTranslation_); }
arma::Col<double> polynomial( const arma::Col<double>& parameter, const arma::uword polynomialOrder) { arma::Col<double> polynomial(polynomialSize(parameter.n_elem, polynomialOrder)); // For any polynomial, all constant terms are 1. switch (polynomialOrder) { case 0: { // Constant polynomials // Constant term polynomial(0) = 1.0; } break; case 1: { // Linear polynomials // Linear term polynomial.head(parameter.n_elem) = parameter; // Constant term polynomial(parameter.n_elem) = 1; } break; case 2: { // Quadratic polynomials // Quadratic term arma::uword n = 0; for (arma::uword k = 0; k < parameter.n_elem; ++k) { for (arma::uword l = k; l < parameter.n_elem; ++l) { polynomial(n++) = parameter(k) * parameter(l); } } // Linear term polynomial.subvec(n, n + parameter.n_elem - 1) = parameter; // Constant term polynomial(polynomial.n_elem - 1) = 1; } break; case 3: { // Cubic polynomials // Cubic term arma::uword n = 0; for (arma::uword k = 0; k < parameter.n_elem; ++k) { for (arma::uword l = k; l < parameter.n_elem; ++l) { for (arma::uword m = l; m < parameter.n_elem; ++m) { polynomial(n++) = parameter(k) * parameter(l) * parameter(m); } } } // Quadratic term for (arma::uword k = 0; k < parameter.n_elem; ++k) { for (arma::uword l = k; l < parameter.n_elem; ++l) { polynomial(n++) = parameter(k) * parameter(l); } } // Linear term polynomial.subvec(n, n + parameter.n_elem - 1) = parameter; // Constant term polynomial(polynomial.n_elem - 1) = 1; } break; default: { // Polynomials of degree >= 4 // All terms, expect the linear and constant one. arma::uword n = 0; // Generates the term for all degrees > 1 for (arma::uword d = 2; d <= polynomialOrder; ++d) { for (const auto& multicombination : multicombinations(parameter.n_elem, d)) { polynomial(n++) = arma::prod(parameter.elem(multicombination)); } } // Linear term polynomial.subvec(n, n + parameter.n_elem - 1) = parameter; // Constant term polynomial(polynomial.n_elem - 1) = 1; } break; } return polynomial; }
bool SpillTree<MetricType, StatisticType, MatType, HyperplaneType, SplitType>:: SplitPoints(const double tau, const double rho, const arma::Col<size_t>& points, arma::Col<size_t>& leftPoints, arma::Col<size_t>& rightPoints) { arma::vec projections(points.n_elem); size_t left = 0, right = 0, leftFrontier = 0, rightFrontier = 0; // Count the number of points to the left/right of the splitting hyperplane. for (size_t i = 0; i < points.n_elem; i++) { // Store projection value for future use. projections[i] = hyperplane.Project(dataset->col(points[i])); if (projections[i] <= 0) { left++; if (projections[i] > -tau) leftFrontier++; } else { right++; if (projections[i] < tau) rightFrontier++; } } const double p1 = double (left + rightFrontier) / points.n_elem; const double p2 = double (right + leftFrontier) / points.n_elem; if ((p1 <= rho || rightFrontier == 0) && (p2 <= rho || leftFrontier == 0)) { // Perform the actual splitting considering the overlapping buffer. Points // with projection value in the range (-tau, tau) are included in both, // leftPoints and rightPoints. leftPoints.resize(left + rightFrontier); rightPoints.resize(right + leftFrontier); for (size_t i = 0, rc = 0, lc = 0; i < points.n_elem; i++) { if (projections[i] < tau || projections[i] <= 0) leftPoints[lc++] = points[i]; if (projections[i] > -tau) rightPoints[rc++] = points[i]; } // Return true, because it is a overlapping node. return true; } // Perform the actual splitting ignoring the overlapping buffer. Points // with projection value less than or equal to zero are included in leftPoints // and points with projection value greater than zero are included in // rightPoints. leftPoints.resize(left); rightPoints.resize(right); for (size_t i = 0, rc = 0, lc = 0; i < points.n_elem; i++) { if (projections[i] <= 0) leftPoints[lc++] = points[i]; else rightPoints[rc++] = points[i]; } // Return false, because it isn't a overlapping node. return false; }
double DualTreeKMeans<MetricType, MatType, TreeType>::Iterate( const arma::mat& centroids, arma::mat& newCentroids, arma::Col<size_t>& counts) { newCentroids.zeros(centroids.n_rows, centroids.n_cols); counts.zeros(centroids.n_cols); if (clusterDistances.n_elem != centroids.n_cols + 1) { clusterDistances.set_size(centroids.n_cols + 1); clusterDistances.fill(DBL_MAX / 2.0); // To prevent overflow. } // Build a tree on the centroids. std::vector<size_t> oldFromNewCentroids; TreeType* centroidTree = BuildTree<TreeType>( const_cast<typename TreeType::Mat&>(centroids), oldFromNewCentroids); // Now run the dual-tree algorithm. typedef DualTreeKMeansRules<MetricType, TreeType> RulesType; RulesType rules(dataset, centroids, newCentroids, counts, oldFromNewCentroids, iteration, clusterDistances, distances, assignments, distanceIteration, metric); // Use the dual-tree traverser. //typename TreeType::template DualTreeTraverser<RulesType> traverser(rules); typename TreeType::template BreadthFirstDualTreeTraverser<RulesType> traverser(rules); traverser.Traverse(*centroidTree, *tree); distanceCalculations += rules.DistanceCalculations(); // Now, calculate how far the clusters moved, after normalizing them. double residual = 0.0; clusterDistances.zeros(); for (size_t c = 0; c < centroids.n_cols; ++c) { if (counts[c] == 0) { newCentroids.col(c).fill(DBL_MAX); // Should have happened anyway I think. } else { const size_t oldCluster = oldFromNewCentroids[c]; newCentroids.col(oldCluster) /= counts(oldCluster); const double dist = metric.Evaluate(centroids.col(c), newCentroids.col(oldCluster)); if (dist > clusterDistances[centroids.n_cols]) clusterDistances[centroids.n_cols] = dist; clusterDistances[oldCluster] = dist; residual += std::pow(dist, 2.0); } } Log::Info << clusterDistances.t(); delete centroidTree; ++iteration; return std::sqrt(residual); }
// Increases the visibility of the internal objective function, to bypass general modification, made by the parent class. using mant::bbob::RosenbrockFunctionRotated::objectiveFunction_; }; SCENARIO("bbob::RosenbrockFunctionRotated.objectiveFunction_", "[bbob::RosenbrockFunctionRotated][bbob::RosenbrockFunctionRotated.objectiveFunction_]") { GIVEN("Some parameters") { // All expected objective values where generated for a 40-dimensional problem instance. const arma::uword numberOfDimensions = 40; CAPTURE(numberOfDimensions); TestRosenbrockFunctionRotated optimisationProblem(numberOfDimensions); arma::Mat<double> parameters; REQUIRE(parameters.load(::rootTestDataDirectory + "/optimisationProblem/benchmarkOptimisationProblem/blackBoxOptimisationBenchmark/_parameters_40x100.input")); THEN("Return their objective value") { arma::Col<double> expectedObjectiveValues; REQUIRE(expectedObjectiveValues.load(::rootTestDataDirectory + "/optimisationProblem/benchmarkOptimisationProblem/blackBoxOptimisationBenchmark/rosenbrockFunctionRotated_dim40_1x100.expected")); for (arma::uword n = 0; n < parameters.n_cols; ++n) { CHECK(optimisationProblem.objectiveFunction_(parameters.col(n)) == Approx(expectedObjectiveValues(n))); } } } } SCENARIO("bbob::RosenbrockFunctionRotated.getNormalisedObjectiveValue", "[bbob::RosenbrockFunctionRotated][bbob::RosenbrockFunctionRotated.getNormalisedObjectiveValue]") { GIVEN("A parameter") { const arma::uword numberOfDimensions = SYNCHRONISED(1 + discreteRandomNumber()); CAPTURE(numberOfDimensions); const arma::Col<double>& parameter = arma::normalise(continuousRandomNumbers(numberOfDimensions));
// Evaluate the function at the point "point" and store result in // the array "result" // Note that the source is located inside the inner sphere inline void evaluate(const arma::Col<CoordinateType>& point, arma::Col<ValueType>& result) const { result.fill(0.); }
}; SCENARIO("bbob::SchaffersF7FunctionIllConditioned.objectiveFunction_", "[bbob::SchaffersF7FunctionIllConditioned][bbob::SchaffersF7FunctionIllConditioned.objectiveFunction_]") { GIVEN("Some parameters") { // All expected objective values where generated for a 40-dimensional problem instance. const arma::uword numberOfDimensions = 40; CAPTURE(numberOfDimensions); TestSchaffersF7FunctionIllConditioned optimisationProblem(numberOfDimensions); REQUIRE(optimisationProblem.rotationQ_.load(::rootTestDataDirectory + "/optimisationProblem/benchmarkOptimisationProblem/blackBoxOptimisationBenchmark/_rotationMatrix_40x40_2.input")); arma::Mat<double> parameters; REQUIRE(parameters.load(::rootTestDataDirectory + "/optimisationProblem/benchmarkOptimisationProblem/blackBoxOptimisationBenchmark/_parameters_40x100.input")); THEN("Return their objective value") { arma::Col<double> expectedObjectiveValues; REQUIRE(expectedObjectiveValues.load(::rootTestDataDirectory + "/optimisationProblem/benchmarkOptimisationProblem/blackBoxOptimisationBenchmark/schaffersF7FunctionIllConditioned_dim40_1x100.expected")); for (arma::uword n = 0; n < parameters.n_cols; ++n) { CHECK(optimisationProblem.objectiveFunction_(parameters.col(n)) == Approx(expectedObjectiveValues(n))); } } } } SCENARIO("bbob::SchaffersF7FunctionIllConditioned.getNormalisedObjectiveValue", "[bbob::SchaffersF7FunctionIllConditioned][bbob::SchaffersF7FunctionIllConditioned.getNormalisedObjectiveValue]") { GIVEN("A parameter") { const arma::uword numberOfDimensions = SYNCHRONISED(1 + discreteRandomNumber()); CAPTURE(numberOfDimensions); const arma::Col<double>& parameter = arma::normalise(continuousRandomNumbers(numberOfDimensions));
TestWeierstrassFunction weierstrassFunction(40); SECTION(".getObjectiveValueImplementation") { SECTION("Returns the objective value") { arma::Mat<double> rotationR; REQUIRE(rotationR.load(testDirectory + "/data/optimisationProblem/blackBoxOptimisationBenchmark/_rotationMatrix_40x40_1.input")); weierstrassFunction.setRotationR(rotationR); arma::Mat<double> rotationQ; REQUIRE(rotationQ.load(testDirectory + "/data/optimisationProblem/blackBoxOptimisationBenchmark/_rotationMatrix_40x40_2.input")); weierstrassFunction.setRotationQ(rotationQ); arma::Mat<double> parameters; REQUIRE(parameters.load(testDirectory + "/data/optimisationProblem/blackBoxOptimisationBenchmark/_parameters_40x100.input")); arma::Col<double> expectedObjectiveValues; REQUIRE(expectedObjectiveValues.load(testDirectory + "/data/optimisationProblem/blackBoxOptimisationBenchmark/bbob_weierstrassFunction_dim40_1x100.expected")); for (arma::uword n = 0; n < parameters.n_cols; ++n) { CHECK(weierstrassFunction.getObjectiveValueImplementation(parameters.col(n)) == Approx(expectedObjectiveValues(n))); } } } SECTION(".toString") { SECTION("Returns a (filesystem friendly) name for the class.") { CHECK(weierstrassFunction.toString() == "bbob_weierstrass_function"); } } }
SCENARIO("bbob::GallaghersGaussian101mePeaksFunction.objectiveFunction_", "[bbob::GallaghersGaussian101mePeaksFunction][bbob::GallaghersGaussian101mePeaksFunction.objectiveFunction_]") { GIVEN("Some parameters") { // All expected objective values where generated for a 40-dimensional problem instance. const arma::uword numberOfDimensions = 40; CAPTURE(numberOfDimensions); TestGallaghersGaussian101mePeaksFunction optimisationProblem(numberOfDimensions); REQUIRE(optimisationProblem.rotationQ_.load(::rootTestDataDirectory + "/optimisationProblem/benchmarkOptimisationProblem/blackBoxOptimisationBenchmark/_rotationMatrix_40x40_1.input")); REQUIRE(optimisationProblem.localParameterConditionings_.load(::rootTestDataDirectory + "/optimisationProblem/benchmarkOptimisationProblem/blackBoxOptimisationBenchmark/_localParameterConditionings_40x101.input")); REQUIRE(optimisationProblem.localParameterTranslations_.load(::rootTestDataDirectory + "/optimisationProblem/benchmarkOptimisationProblem/blackBoxOptimisationBenchmark/_localParameterTranslations_40x101.input")); arma::Mat<double> parameters; REQUIRE(parameters.load(::rootTestDataDirectory + "/optimisationProblem/benchmarkOptimisationProblem/blackBoxOptimisationBenchmark/_parameters_40x100.input")); THEN("Return their objective value") { arma::Col<double> expectedObjectiveValues; REQUIRE(expectedObjectiveValues.load(::rootTestDataDirectory + "/optimisationProblem/benchmarkOptimisationProblem/blackBoxOptimisationBenchmark/gallaghersGaussian101mePeaksFunction_dim40_1x100.expected")); for (arma::uword n = 0; n < parameters.n_cols; ++n) { CHECK(optimisationProblem.objectiveFunction_(parameters.col(n)) == Approx(expectedObjectiveValues(n))); } } } } SCENARIO("bbob::GallaghersGaussian101mePeaksFunction.getNormalisedObjectiveValue", "[bbob::GallaghersGaussian101mePeaksFunction][bbob::GallaghersGaussian101mePeaksFunction.getNormalisedObjectiveValue]") { GIVEN("A parameter") { const arma::uword numberOfDimensions = SYNCHRONISED(1 + discreteRandomNumber()); CAPTURE(numberOfDimensions); const arma::Col<double>& parameter = arma::normalise(continuousRandomNumbers(numberOfDimensions));