ResultVector cluster(vector<LocalFeatures*> &locfeat, bool clusterWithPositions, string splitMode, uint maxSplit, uint stopWithNClusters, string disturbMode, string poolMode, uint dontSplitBelow, uint iterationsBetweenSplits, uint minObservationsPerCluster, double epsilon, string distanceMaker, string saveModelTo, bool saveBeforeSplits, string loadModelFrom) { BaseClusterer *clusterer = new EM(); if(loadModelFrom!="") { clusterer->loadModel(loadModelFrom); } setupClusterer(dynamic_cast<EM*>(clusterer), splitMode, maxSplit, stopWithNClusters, disturbMode, poolMode, dontSplitBelow, iterationsBetweenSplits, minObservationsPerCluster, epsilon, distanceMaker, saveBeforeSplits); ResultVector clusterinformation; if (clusterWithPositions) { DBG(10) << "clustering with position information included" << endl; } DoubleVectorVector localFeaturesData; for (uint j = 0; j < locfeat.size(); j++) { LocalFeatures* localFeatures = locfeat[j]; int xSize = localFeatures->imageSizeX(); int ySize = localFeatures->imageSizeY(); for (uint i = 0; i < localFeatures->numberOfFeatures(); i++) { DoubleVector* lfvector = &((*localFeatures)[i]); if (clusterWithPositions) { pair<double, double> pos = localFeatures->relativePosition(i); lfvector->push_back((double) localFeatures->position(i).x / (double) xSize); lfvector->push_back((double) localFeatures->position(i).y / (double) ySize); } localFeaturesData.push_back( lfvector ); } } if(saveModelTo!="") { dynamic_cast<EM*>(clusterer)->run(localFeaturesData, clusterinformation, saveModelTo); clusterer->saveModel(saveModelTo); } else { clusterer->run(localFeaturesData, clusterinformation); } delete clusterer; return clusterinformation; }
double predict(size_t steps) { if (points_.empty()) { return 0; } if (points_.size() != nPoints_ - 1 + nDim_) { return points_.back(); } for (size_t i = 0; i < nDim_; ++i) { for (size_t j = 0; j < nPoints_; ++j) { x_(i, j) = points_[i + j]; } } auto c = x_ * x_.transpose() / nPoints_; SelfAdjointEigenSolver<MatrixXd> esC(c); auto v = esC.eigenvectors(); // LOG(INFO) << OUTLN(esC.eigenvalues()); // LOG(INFO) << OUTLN(v); for (size_t i = 0; i < nDim_ - 1; ++i) { for (size_t j = 0; j < nEigen_; ++j) { vStar_(i, j) = v(i, nDim_ - 1 - j); } } for (size_t i = 0; i < nEigen_; ++i) { vTau_(0, i) = v(nDim_ - 1, nDim_ - 1 - i); } auto predictionM = (vTau_ * vStar_.transpose()) / (1.0 - (vTau_ * vTau_.transpose())(0, 0)); // cout << OUTLN(predictionM) << OUTLN(predictionM.sum()); DoubleVector computed; auto getPoint = [&](size_t index) { if (index < points_.size()) { return points_[index]; } return computed[index - points_.size()]; }; for (size_t iStep = 0; iStep < steps; ++iStep) { for (size_t i = 0; i < nDim_ - 1; ++i) { q_(i, 0) = getPoint((nPoints_ - 1 + nDim_) + i - (nDim_ - 1) + iStep); } computed.emplace_back((predictionM * q_)(0, 0)); } return computed.back(); }
double _weightedSumOfLenSq(const RDGeom::Point3DConstPtrVect &points, const DoubleVector &weights) { PRECONDITION(points.size() == weights.size(), ""); double res = 0.0; RDGeom::Point3DConstPtrVect_CI pti; const double *wData = weights.getData(); unsigned int i = 0; for (pti = points.begin(); pti != points.end(); pti++) { res += (wData[i] * ((*pti)->lengthSq())); i++; } return res; }
void Pattern::configure(ConfigurationParameters& params, QString prefix) { //--- get all parameters with the prefix 'cluster:' QStringList clusterList = params.getParametersWithPrefixList( prefix, "cluster:" ); foreach( QString cluster, clusterList ) { QString id = cluster.split(':')[1]; if ( id.isNull() || id.isEmpty() ) continue; //--- now, it check if there is a inputs and outputs parameter and load it QString str = params.getValue( prefix + "inputs:" + id ); DoubleVector inputs; if (!str.isNull()) { QStringList list = str.split(QRegExp("\\s+"), QString::SkipEmptyParts); for( int i=0; i<list.size(); i++) { inputs.append( list[i].toDouble() ); } } str = params.getValue( prefix + "outputs:" + id ); DoubleVector outputs; if (!str.isNull()) { QStringList list = str.split(QRegExp("\\s+"), QString::SkipEmptyParts); for( int i=0; i<list.size(); i++) { outputs.append( list[i].toDouble() ); } } if ( inputs.size() == 0 && outputs.size() == 0 ) continue; Cluster* cl = params.getObjectFromParameter<Cluster>( prefix+cluster, false, true ); if ( inputs.size() > 0 ) { setInputsOf( cl, inputs ); } if ( outputs.size() > 0 ) { setOutputsOf( cl, outputs ); } }
DoubleVector RowDoubleMatrix::conjugateGradient(const DoubleVector &B, double epsilon, unsigned int niter, bool printMessages, unsigned int messageStep) const { DoubleVector X(size_, 0.0); // начальное приближение - вектор нулей DoubleVector resid(size_); // невязка DoubleVector direction; // направление поиска DoubleVector temp(size_); // ременное хранилище для обмена данными double resid_norm; // норма невязки double alpha; double beta; double resid_resid, resid_resid_new; residual(X, B, resid); direction = resid; resid_norm = resid.norm_2(); if (printMessages) std::cout << "Начальная невязка: " << resid_norm << std::endl; if (resid_norm > epsilon) { resid_resid = resid * resid; for (unsigned int i = 0; i < niter; i++) { product(direction, temp); // std::cout << direction.norm_2() << " " << temp.norm_2() << std::endl; alpha = (resid_resid) / (direction * temp); X += alpha * direction; resid -= alpha * temp; resid_resid_new = resid * resid; resid_norm = sqrt(resid_resid_new); if (resid_norm <= epsilon) { if (printMessages) std::cout << "Решение найдено. Итераций: " << i << ", невязка: " << resid_norm << std::endl; break; } if (printMessages && (i % messageStep == 0)) std::cout << i << ", невязка: " << resid_norm << std::endl; beta = (resid_resid_new) / (resid_resid); // d = r + d*beta direction.scale(beta); direction += resid; // resid_resid = resid_resid_new; } } return X; }
void MainWindow::gradient() { DoubleVector x; x.push_back(-1.0); x.push_back(+1.2); SampleGradient gm; connect(&gm, SIGNAL(showCoordinares(const DoubleVector &)), this, SLOT(showCoordinares(const DoubleVector &))); gm.setEpsilon1(0.00001); gm.setEpsilon2(0.00001); gm.setEpsilon3(0.00001); gm.setR1MinimizeEpsilon(0.01, 0.00001); gm.calculate(x); }
void LowessSmoothing::smoothData(const DoubleVector& input_x, const DoubleVector& input_y, DoubleVector& smoothed_output) { if (input_x.size() != input_y.size()) { throw Exception::InvalidValue(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Sizes of x and y values not equal! Aborting... ", String(input_x.size())); } // unable to smooth over 2 or less data points (we need at least 3) if (input_x.size() <= 2) { smoothed_output = input_y; return; } Size input_size = input_y.size(); // const Size q = floor( input_size * alpha ); const Size q = (window_size_ < input_size) ? static_cast<Size>(window_size_) : input_size - 1; DoubleVector distances(input_size, 0.0); DoubleVector sortedDistances(input_size, 0.0); for (Size outer_idx = 0; outer_idx < input_size; ++outer_idx) { // Compute distances. // Size inner_idx = 0; for (Size inner_idx = 0; inner_idx < input_size; ++inner_idx) { distances[inner_idx] = std::fabs(input_x[outer_idx] - input_x[inner_idx]); sortedDistances[inner_idx] = distances[inner_idx]; } // Sort distances in order from smallest to largest. std::sort(sortedDistances.begin(), sortedDistances.end()); // Compute weigths. std::vector<double> weigths(input_size, 0); for (Size inner_idx = 0; inner_idx < input_size; ++inner_idx) { weigths.at(inner_idx) = tricube_(distances[inner_idx], sortedDistances[q]); } //calculate regression Math::QuadraticRegression qr; std::vector<double>::const_iterator w_begin = weigths.begin(); qr.computeRegressionWeighted(input_x.begin(), input_x.end(), input_y.begin(), w_begin); //smooth y-values double rt = input_x[outer_idx]; smoothed_output.push_back(qr.eval(rt)); } return; }
DoubleVector MSMSFragmentation::getLossMasses () { DoubleVector dv; dv.push_back ( 0.0 ); for ( int i = 0 ; i < ionTypes.size () ; i++ ) { string it = ionTypes [i]; if ( isPrefix ( it, "M+" ) || isPrefix ( it, "M-" ) ) { string loss = it.substr ( 1 ); if ( loss.length () > 1 && isdigit ( loss [1] ) ) { dv.push_back ( atof ( loss.c_str () ) ); } } } return dv; }
RDGeom::Point3D _weightedSumOfPoints(const RDGeom::Point3DConstPtrVect &points, const DoubleVector &weights) { PRECONDITION(points.size() == weights.size(), ""); RDGeom::Point3DConstPtrVect_CI pti; RDGeom::Point3D tmpPt, res; const double *wData = weights.getData(); unsigned int i = 0; for (pti = points.begin(); pti != points.end(); pti++) { tmpPt = (*(*pti)); tmpPt *= wData[i]; res += tmpPt; i++; } return res; }
void TimeWarp::calculateCausalChromaSimilarityMatrix(DoubleMatrix& firstChromaMatrix, DoubleMatrix& secondChromaMatrix, DoubleMatrix& simMatrix){ //calculates the chroma only similarity matrix //but we have already done some, so is extending it... int size = 0; if (simMatrix.size() > 0){ size = simMatrix[0].size(); } if (secondChromaMatrix.size() > size ){ for (int x = 0;x < firstChromaMatrix.size();x++){ if (x < simMatrix.size()){ //i.e. entries already exist for (int y = (int)simMatrix[x].size();y < secondChromaMatrix.size();y++){ double distance; if (useDotProduct) distance = getChromaSimilarity(x, y, &firstChromaMatrix, &secondChromaMatrix); else distance = getEuclideanDistance(x, y, &firstChromaMatrix, &secondChromaMatrix); printf("putting one back X %i Y %i dist %f \n", x, y, distance); simMatrix[x].push_back(distance); } } else { DoubleVector d; for (int y = 0;y < secondChromaMatrix.size();y++){ double distance; if (useDotProduct) distance = getChromaSimilarity(x, y, &firstChromaMatrix, &secondChromaMatrix); else distance = getEuclideanDistance(x, y, &firstChromaMatrix, &secondChromaMatrix); printf("new row X %i Y %i dist %f\n", x, y, distance); d.push_back( distance); } simMatrix.push_back(d); } } } if (size > 0) printf("Causial CHROMA ONLY SIM SIZE %i x %i; ", (int)simMatrix.size(), (int) simMatrix[0].size()); printf("First matrix SIZE %i , SECOND size %i\n", (int)firstChromaMatrix.size(), (int) secondChromaMatrix.size()); }
DoubleVector& get_cleaved_masses ( const string& protein, const IntVector& cleavageIndex ) { static DoubleVector cleavedMassArray ( 36000 ); StringSizeType numAA = protein.length (); if ( numAA > cleavedMassArray.size () ) cleavedMassArray.resize ( numAA ); for ( StringSizeType i = 0, j = 0 ; i < numAA ; ) { double mass = 0.0; StringSizeType index = cleavageIndex [j]; while ( i <= index ) { mass += amino_acid_wt [protein[i++]]; } cleavedMassArray [j++] = mass; } return cleavedMassArray; }
void madara::knowledge::containers::FlexMap::to_container( DoubleVector& target) const { if (context_) { ContextGuard context_guard(*context_); MADARA_GUARD_TYPE guard(mutex_); // get all children KnowledgeBase knowledge; knowledge.facade_for(*context_); target.set_delimiter(delimiter_); target.set_name(name_, knowledge); } }
void PlotFunction::drawVector(DoubleVector& energyVec, int minIndex, int maxIndex, const WindowRegion& window, const double& maxNumberOfIndexPoints, const double& maxValue){ float screenHeight = window.height; float screenWidth = window.width; double numberOfIndexPoints = min(maxNumberOfIndexPoints, (double) maxIndex - minIndex); double indicesPerStep = (maxIndex - minIndex) / numberOfIndexPoints; double pixelsPerStep = window.width / numberOfIndexPoints; int i, j; double heightScalar = window.height / (1.1*maxValue); int lastHeight = window.y + screenHeight - (energyVec[minIndex]*heightScalar);; int newHeight; int xPosition; int lastXposition = window.x; double exactIndex; for (exactIndex = minIndex; exactIndex < maxIndex; exactIndex += indicesPerStep){ j = round(exactIndex); i = j - minIndex; if (j < energyVec.size()){ xPosition = window.x + i*pixelsPerStep; newHeight = window.y + screenHeight - (energyVec[j]*heightScalar); ofLine(lastXposition, lastHeight, xPosition, newHeight); lastHeight = newHeight; lastXposition = xPosition; } } }
int RateGammaInvar::computePatternRates(DoubleVector &pattern_rates, IntVector &pattern_cat) { //cout << "Computing Gamma site rates by empirical Bayes..." << endl; phylo_tree->computePatternLhCat(WSL_RATECAT); int npattern = phylo_tree->aln->getNPattern(); pattern_rates.resize(npattern); pattern_cat.resize(npattern); double *lh_cat = phylo_tree->_pattern_lh_cat; for (int i = 0; i < npattern; i++) { double sum_rate = 0.0, sum_lh = phylo_tree->ptn_invar[i]; int best = 0; double best_lh = phylo_tree->ptn_invar[i]; for (int c = 0; c < ncategory; c++) { sum_rate += rates[c] * lh_cat[c]; sum_lh += lh_cat[c]; if (lh_cat[c] > best_lh || (lh_cat[c] == best_lh && random_double()<0.5)) { // break tie at random best = c+1; best_lh = lh_cat[c]; } } pattern_rates[i] = sum_rate / sum_lh; pattern_cat[i] = best; lh_cat += ncategory; } return ncategory+1; }
void StandardModel<Two_scale>::set(const DoubleVector& y) { int i, j, k = 0; for (i = 1; i <= 3; i++) for (j = 1; j <= 3; j++) { k++; yu(i, j) = y.display(k); yd(i, j) = y.display(k + 9); ye(i, j) = y.display(k + 18); } k = 27; for (i = 1; i <= 3; i++) { k++; g(i) = y.display(k); } }
DoubleVector RRSchedule::reconfigureWeek(DoubleVector _week, Vector _permute) // Creates a new week based on the current week and a permutation { DoubleVector newWeek; for (Vector::const_iterator tSlotNum = _permute.begin(); tSlotNum != _permute.end(); ++tSlotNum) { // For week0, there are not enough timeslots if ((week0 and *tSlotNum < max_times - skip_first) or (not week0)) { newWeek.push_back(_week[*tSlotNum]); } } return newWeek; }
bool ThermochromicGlazing_Impl::setThickness(double value) { GlazingVector glazings = mf_glazings(); DoubleVector rollbackValues; for (unsigned i = 0,n = glazings.size(); i < n; ++ i) { rollbackValues.push_back(glazings[i].thickness()); bool ok = glazings[i].setThickness(value); if (!ok) { // rollback previous values for (int j = i-1; j >= 0; --j) { glazings[j].setThickness(rollbackValues[j]); } return false; } } return true; }
void CosSimilarityList::printDelimited ( ostream& os, const DoubleVector& c ) const { for ( int i = 0 ; i < c.size () ; i++ ) { if ( c [i] == -100.0 ) delimitedEmptyCell ( os ); else delimitedCellSigFig ( os, c [i], 3 ); } }
void gaugegravityBcs1( MssmSoftsusy & m, const DoubleVector & inputParameters ) { double M_moduli_local, M_gauge_local, M_mess_local ; M_moduli_local = inputParameters.display(1) ; double m1, m2, m3 ; m.setGaugeCoupling( 1, global_g1 ); m.setGaugeCoupling( 2, global_g2 ); m.setGaugeCoupling( 3, global_g3 ); m1 = l1 * M_moduli_local ; m2 = l2 * M_moduli_local ; m3 = l3 * M_moduli_local ; m.setGauginoMass( 1, m1 ) ; m.setGauginoMass( 2, m2 ) ; m.setGauginoMass( 3, m3 ) ; double mqlsq , mllsq, mursq, mdrsq, mersq ; double mhusq , mhdsq; double M_moduli_sqr_local; M_moduli_sqr_local = M_moduli_local*M_moduli_local ; mqlsq = ( 1 - nQ ) * M_moduli_sqr_local ; mllsq = ( 1 - nL ) * M_moduli_sqr_local ; mursq = ( 1 - nU ) * M_moduli_sqr_local ; mdrsq = ( 1 - nD ) * M_moduli_sqr_local ; mersq = ( 1 - nE ) * M_moduli_sqr_local ; mhusq = ( 1 - nHu) * M_moduli_sqr_local ; mhdsq = ( 1 - nHd) * M_moduli_sqr_local ; DoubleMatrix id(3, 3); id(1, 1) = 1.0; id(2, 2) = 1.0; id(3, 3) = 1.0; m.setSoftMassMatrix(mQl, mqlsq * id); m.setSoftMassMatrix(mUr, mursq * id); m.setSoftMassMatrix(mDr, mdrsq * id); m.setSoftMassMatrix(mLl, mllsq * id); m.setSoftMassMatrix(mEr, mersq * id); m.setMh1Squared(mhdsq); m.setMh2Squared(mhusq); double A_HuQU , A_HdQD, A_HdLE ; A_HuQU = ( 3 - nHu - nQ - nU ) * M_moduli_local ; A_HdQD = ( 3 - nHd - nQ - nD ) * M_moduli_local ; A_HdLE = ( 3 - nHd - nL - nE ) * M_moduli_local ; m.setTrilinearElement(UA, 1, 1, m.displayYukawaElement(YU, 1, 1) * A_HuQU); m.setTrilinearElement(UA, 2, 2, m.displayYukawaElement(YU, 2, 2) * A_HuQU); m.setTrilinearElement(UA, 3, 3, m.displayYukawaElement(YU, 3, 3) * A_HuQU); m.setTrilinearElement(DA, 1, 1, m.displayYukawaElement(YD, 1, 1) * A_HdQD); m.setTrilinearElement(DA, 2, 2, m.displayYukawaElement(YD, 2, 2) * A_HdQD); m.setTrilinearElement(DA, 3, 3, m.displayYukawaElement(YD, 3, 3) * A_HdQD); m.setTrilinearElement(EA, 1, 1, m.displayYukawaElement(YE, 1, 1) * A_HdLE); m.setTrilinearElement(EA, 2, 2, m.displayYukawaElement(YE, 2, 2) * A_HdLE); m.setTrilinearElement(EA, 3, 3, m.displayYukawaElement(YE, 3, 3) * A_HdLE); }
void DataPartitions::doFrequency(DoubleVector dimData){ m_partitions.clear(); PairVector dimOrData; initPairs(dimOrData, dimData); pairSort(dimOrData); int dimsize = dimData.size(); int nBins = this->m_rddtClust->m_rddtOp->m_nBins; if(nBins == 0 ){ nBins = 1; } if(nBins>dimsize){ nBins = dimsize; } for(int i = 0; i< nBins; i++){ DataPartition* partition = new DataPartition(); partition->setPartitions(this); partition->m_partitionIdx = i; this->m_partitions.push_back(partition); } int partitionsize = (int)ceil((double)dimsize/(double)nBins); for(unsigned i = 0; i< dimOrData.size(); i++){ int p = (int)((double)i/(double)partitionsize); if(p>(int)m_partitions.size()) p=m_partitions.size(); int first = (dimOrData[i]).first; (m_partitions[p])->m_parIndices.push_back(first); } }
void CosSimilarityList::printHTML ( ostream& os, const DoubleVector& c, const string& styleID ) const { for ( int i = 0 ; i < c.size () ; i++ ) { if ( c [i] == -100.0 ) tableEmptyCell ( os, styleID ); else tableCellSigFig ( os, c [i], 3, false, styleID ); } }
int RateGamma::computePatternRates(DoubleVector &pattern_rates, IntVector &pattern_cat) { //cout << "Computing Gamma site rates by empirical Bayes..." << endl; phylo_tree->computePatternLhCat(WSL_RATECAT); int npattern = phylo_tree->aln->getNPattern(); pattern_rates.resize(npattern); pattern_cat.resize(npattern); double *lh_cat = phylo_tree->_pattern_lh_cat; for (int i = 0; i < npattern; i++) { double sum_rate = 0.0, sum_lh = 0.0; int best = 0; for (int c = 0; c < ncategory; c++) { sum_rate += rates[c] * lh_cat[c]; sum_lh += lh_cat[c]; if (lh_cat[c] > lh_cat[best] || (lh_cat[c] == lh_cat[best] && random_double()<0.5)) // break tie at random best = c; } pattern_rates[i] = sum_rate / sum_lh; pattern_cat[i] = best; lh_cat += ncategory; } return ncategory; // pattern_rates.clear(); // pattern_rates.insert(pattern_rates.begin(), ptn_rates, ptn_rates + npattern); // pattern_cat.resize(npattern, 0); // for (int i = 0; i < npattern; i++) // for (int j = 1; j < ncategory; j++) // if (fabs(rates[j] - ptn_rates[i]) < fabs(rates[pattern_cat[i]] - ptn_rates[i])) // pattern_cat[i] = j; // delete [] ptn_rates; }
QVariant LinearFunction_Impl::toVariant() const { QVariantMap linearFunctionData = AnalysisObject_Impl::toVariant().toMap(); linearFunctionData["function_type"] = QString("LinearFunction"); QVariantList variablesList; DoubleVector coeffs = coefficients(); int index(0), coeffsN(coeffs.size()); Q_FOREACH(const Variable& var,variables()) { QVariantMap varMap = var.toVariant().toMap(); varMap["variable_index"] = index; if (index < coeffsN) { varMap["coefficient"] = coeffs[index]; } variablesList.push_back(QVariant(varMap)); ++index; }
DoubleVectorVector QuanMSMSXMLData::getFormulaPurityCoefficients ( const string& name ) const { DoubleVectorVector dvv; StringVector f = getQuanMSMSInfo ( name ).formulae; DoubleVector m = getQuanMSMSInfo ( name ).masses; for ( int i = 0 ; i < f.size () ; i++ ) { DoubleVector dv; if ( f [i] != "" ) { IsotopePeakStats ips ( f [i], 1 ); int index = ips.getProbabilityIndexForMass ( m [i] ); if ( index >= 2 ) dv.push_back ( ips.getProbability ( index - 2 ) ); else dv.push_back ( 0.0 ); if ( index >= 1 ) dv.push_back ( ips.getProbability ( index - 1 ) ); else dv.push_back ( 0.0 ); dv.push_back ( ips.getProbability ( index + 1 ) ); dv.push_back ( ips.getProbability ( index + 2 ) ); } else { // No formulae so assume no correction for ( int j = 0 ; j < 4 ; j++ ) { dv.push_back ( 0.0 ); } } dvv.push_back ( dv ); } return dvv; }
bool StepFunction::derivate( const DoubleVector& inputs, const DoubleVector&, DoubleVector& derivates ) const { //--- return the same as if it is a sigmoid with lambda = 1 for( unsigned int i=0; i<inputs.size(); i++ ) { double y; y = 1.0/( 1.0 + std::exp( -inputs[i] ) ); derivates[i] = y * ( 1.0 - y ); } return true; }
void StopRule::multiple (DoubleVector &vec1, DoubleMatrix &mat2, DoubleVector &proVec) { int row_, col_; proVec.resize(mat2[0].size()); for (col_ = 0; col_ < mat2[0].size(); col_ ++) { proVec[col_] = 0.0; for (row_ = 0; row_ < mat2.size(); row_ ++) proVec[col_] += vec1[row_] * mat2[row_][col_]; } }
void DataPartitions::initPairs(PairVector &dimOrData, DoubleVector dimData){ dimOrData.clear(); for(unsigned i = 0; i< dimData.size(); i++){ int first = (int)i; double second = dimData[i]; std::pair<int, double> temp(first,second); dimOrData.push_back(temp); } }
bool TimeWarp::extendAlignmentUp(const int& endIndexY, DoubleMatrix *alignmentMatrix){ DoubleVector d; d = (*alignmentMatrix)[0];//alignmentMatrix[0];// int heightSize = d.size(); if (heightSize < endIndexY){ //then we haven't finished yet for (int i = 0;i < (*alignmentMatrix).size();i++){ double value = getDistance(i, heightSize); value += getRestrictedMinimum(i, heightSize, value, 0, 0);//min values 0 (*alignmentMatrix)[i].push_back(value);// } } if ((*alignmentMatrix)[0].size() == endIndexY) return true; else return false; }
void RddtClust::getDimData(DoubleVector& dimData, int idx){ using namespace Rcpp; XmdvToolMainWnd* m_mwd = this->m_rddtOp->m_pm->getMainWnd(); RInside r = m_mwd->getRinstance(); //qDebug()<<"idx qt "<<idx; r["currentDim"] = idx; std::string cmd = "currentDim <- as.integer(currentDim)+1;\n" //"print(currentDim);\n" "dimData <- M[,currentDim];\n" "dimData"; SEXP evals = r.parseEval(cmd); NumericVector rDimData(evals); dimData.clear(); for(int i = 0; i< rDimData.length(); i++){ dimData.push_back(rDimData[i]); } }
// l labels the position the vector index goes in. // After that, indices are cyclic. Tensor outerProduct(const DoubleVector &V, const DoubleMatrix & M, int l) { Tensor temp; int i, j, k; for (i=1; i<=3; i++) for (j=1; j<=3; j++) for (k=1; k<=3; k++) { switch(l) { case 1: temp(i, j, k) = V.display(i) * M.display(j, k); break; case 2: temp(i, j, k) = V.display(j) * M.display(k, i); break; case 3: temp(i, j, k) = V.display(k) * M.display(i, j); break; default: ostringstream ii; ii << "Trying to outer product " << l << "th element of tensor"; throw ii.str(); break; } } return temp; }