// сегментирование графа с учетом весов void DisjointSetForest::segmentGraph( int no_of_vertices, int no_of_edges, vector<Edge>& edges, float c ) { init( no_of_vertices ); sort( edges.begin(), edges.end(), []( Edge& a, Edge& b){ return a.weight < b.weight; }); vector<float> thresholds( no_of_vertices, c ); for( Edge& edge: edges ){ int a = this->find( edge.a ); int b = this->find( edge.b ); if( a != b ) { if( edge.weight <= thresholds[a] && edge.weight <= thresholds[b] ) { this->join( a, b ); a = this->find( a ); thresholds[a] = edge.weight + c / this->size( a ); } } } }
inline QVector<int> otsu(QVector<int> histogram, int classes) { qreal maxSum = 0.; QVector<int> thresholds(classes - 1, 0); QVector<qreal> H = buildTables(histogram); QVector<int> index(classes + 1); index[0] = 0; index[index.size() - 1] = histogram.size() - 1; for_loop(&maxSum, &thresholds, H, 1, histogram.size() - classes + 1, 1, histogram.size(), &index); return thresholds; }
void Pruner<FT>::optimize_coefficients_local_adjust_decr_single(/*io*/ vector<double> &pr) { int maxi, lasti, consecutive_fails; double improved_ratio, current_max = 0.0; FT old_cf, old_cfs, new_cf, old_b; vector<double> detailed_cost(n); vector<double> slices(n, 10.0); // (b[i+1] - b[i])/slice will be used as step vector<int> thresholds(n, 3); vec b(n); load_coefficients(b, pr); lasti = -1; // last failed index, make sure we do not try it again in // the next time consecutive_fails = 0; // number of consecutive failes; break if // reaches it improved_ratio = 0.995; // if reduced by 0.995, descent while (1) { // old cost old_cf = target_function(b); // find bottleneck index old_cfs = single_enum_cost(b, &(detailed_cost)); // heuristic #ifdef BALANCE_HEURISTIC_PRUNER_OPTIMIZE if (old_cfs < sqrt(old_cf) / 10.0) break; #endif current_max = 0.0; maxi = 0; for (int i = 0; i < n; i++) { if ((i != (n - lasti - 1)) && (thresholds[n - i - 1] > 0)) { if (detailed_cost[i] > current_max) { current_max = detailed_cost[i]; maxi = i; } } } // b[ind] is the one to be reduced int ind = n - maxi - 1; old_b = b[ind]; if (ind != 0) { b[ind] = b[ind] - (b[ind] - b[ind - 1]) / slices[ind]; } else { break; } // new cost new_cf = target_function(b); // if not improved -- recover if (new_cf >= (old_cf * improved_ratio)) { b[ind] = old_b; lasti = ind; thresholds[lasti]--; consecutive_fails++; } else { // cerr << " improved from " << old_cf << " to " << new_cf << endl; if (slices[ind] < 1024) slices[ind] = slices[ind] * 1.05; consecutive_fails = 0; } // quit after 10 consecutive failes if (consecutive_fails > 10) { break; } } #ifdef DEBUG_PRUNER_OPTIMIZE_TC cerr << "# [TuningCost]" << endl; cerr << b << endl; cerr << "# [TuningCost] all_enum_cost = " << repeated_enum_cost(b) << endl; cerr << "# [TuningCost] succ_probability = " << measure_metric(b) << endl; #endif save_coefficients(pr, b); }
void VJCascadeClassifier::run(const string& dataFileName, const string& shypFileName, int numIterations, const string& outResFileName ) { // loading data InputData* pData = loadInputData(dataFileName, shypFileName); const int numOfExamples = pData->getNumExamples(); //get the index of positive label const NameMap& namemap = pData->getClassMap(); _positiveLabelIndex = namemap.getIdxFromName( _positiveLabelName ); if (_verbose > 0) cout << "Loading strong hypothesis..." << flush; // The class that loads the weak hypotheses UnSerialization us; // Where to put the weak hypotheses vector<vector<BaseLearner*> > weakHypotheses; // For stagewise thresholds vector<AlphaReal> thresholds(0); // loads them //us.loadHypotheses(shypFileName, weakHypotheses, pData); us.loadCascadeHypotheses(shypFileName, weakHypotheses, thresholds, pData); // store result vector<CascadeOutputInformation> cascadeData(0); vector<CascadeOutputInformation>::iterator it; cascadeData.resize(numOfExamples); for( it=cascadeData.begin(); it != cascadeData.end(); ++it ) { it->active=true; } if (!_outputInfoFile.empty()) { outputHeader(); } for(int stagei=0; stagei < weakHypotheses.size(); ++stagei ) { // for posteriors vector<AlphaReal> posteriors(0); // calculate the posteriors after stage VJCascadeLearner::calculatePosteriors( pData, weakHypotheses[stagei], posteriors, _positiveLabelIndex ); // update the data (posteriors, active element index etc.) updateCascadeData(pData, weakHypotheses, stagei, posteriors, thresholds, _positiveLabelIndex, cascadeData); if (!_outputInfoFile.empty()) { _output << stagei + 1 << "\t"; _output << weakHypotheses[stagei].size() << "\t"; outputCascadeResult( pData, cascadeData ); } int numberOfActiveInstance = 0; for( int i = 0; i < numOfExamples; ++i ) if (cascadeData[i].active) numberOfActiveInstance++; if (_verbose > 0 ) cout << "Number of active instances: " << numberOfActiveInstance << "(" << numOfExamples << ")" << endl; } vector<vector<int> > confMatrix(2); confMatrix[0].resize(2); fill( confMatrix[0].begin(), confMatrix[0].end(), 0 ); confMatrix[1].resize(2); fill( confMatrix[1].begin(), confMatrix[1].end(), 0 ); // print accuracy for(int i=0; i<numOfExamples; ++i ) { vector<Label>& labels = pData->getLabels(i); if (labels[_positiveLabelIndex].y>0) // pos label if (cascadeData[i].forecast==1) confMatrix[1][1]++; else confMatrix[1][0]++; else // negative label if (cascadeData[i].forecast==0) confMatrix[0][0]++; else confMatrix[0][1]++; } double acc = 100.0 * (confMatrix[0][0] + confMatrix[1][1]) / ((double) numOfExamples); // output it cout << endl; cout << "Error Summary" << endl; cout << "=============" << endl; cout << "Accuracy: " << setprecision(4) << acc << endl; cout << setw(10) << "\t" << setw(10) << namemap.getNameFromIdx(1-_positiveLabelIndex) << setw(10) << namemap.getNameFromIdx(_positiveLabelIndex) << endl; cout << setw(10) << namemap.getNameFromIdx(1-_positiveLabelIndex) << setw(10) << confMatrix[0][0] << setw(10) << confMatrix[0][1] << endl; cout << setw(10) << namemap.getNameFromIdx(_positiveLabelIndex) << setw(10) << confMatrix[1][0] << setw(10) << confMatrix[1][1] << endl; // output forecast if (!outResFileName.empty() ) outputForecast(pData, outResFileName, cascadeData ); // free memory allocation vector<vector<BaseLearner*> >::iterator bvIt; for( bvIt = weakHypotheses.begin(); bvIt != weakHypotheses.end(); ++bvIt ) { vector<BaseLearner* >::iterator bIt; for( bIt = (*bvIt).begin(); bIt != (*bvIt).end(); ++bIt ) delete *bIt; } }
void VJCascadeClassifier::savePosteriors(const string& dataFileName, const string& shypFileName, const string& outFileName, int numIterations) { // loading data InputData* pData = loadInputData(dataFileName, shypFileName); const int numOfExamples = pData->getNumExamples(); //get the index of positive label const NameMap& namemap = pData->getClassMap(); _positiveLabelIndex = namemap.getIdxFromName( _positiveLabelName ); if (_verbose > 0) cout << "Loading strong hypothesis..." << flush; // open outfile ofstream outRes(outFileName.c_str()); if (!outRes.is_open()) { cout << "Cannot open outfile!!! " << outFileName << endl; } // The class that loads the weak hypotheses UnSerialization us; // Where to put the weak hypotheses vector<vector<BaseLearner*> > weakHypotheses; // For stagewise thresholds vector<AlphaReal> thresholds(0); // loads them //us.loadHypotheses(shypFileName, weakHypotheses, pData); us.loadCascadeHypotheses(shypFileName, weakHypotheses, thresholds, pData); // output the number of stages outRes << "StageNum " << weakHypotheses.size() << endl; // output original labels outRes << "Labels"; for(int i=0; i<numOfExamples; ++i ) { vector<Label>& labels = pData->getLabels(i); if (labels[_positiveLabelIndex].y>0) // pos label outRes << " 1"; else outRes << " 0"; } outRes << endl; // store result vector<CascadeOutputInformation> cascadeData(0); vector<CascadeOutputInformation>::iterator it; cascadeData.resize(numOfExamples); for( it=cascadeData.begin(); it != cascadeData.end(); ++it ) { it->active=true; } for(int stagei=0; stagei < weakHypotheses.size(); ++stagei ) { // for posteriors vector<AlphaReal> posteriors(0); // calculate the posteriors after stage VJCascadeLearner::calculatePosteriors( pData, weakHypotheses[stagei], posteriors, _positiveLabelIndex ); // update the data (posteriors, active element index etc.) //VJCascadeLearner::forecastOverAllCascade( pData, posteriors, activeInstances, thresholds[stagei] ); updateCascadeData(pData, weakHypotheses, stagei, posteriors, thresholds, _positiveLabelIndex, cascadeData); int numberOfActiveInstance = 0; for( int i = 0; i < numOfExamples; ++i ) if (cascadeData[i].active) numberOfActiveInstance++; if (_verbose > 0 ) cout << "Number of active instances: " << numberOfActiveInstance << "(" << numOfExamples << ")" << endl; // output stats outRes << "Stage " << stagei << " " << weakHypotheses[stagei].size() << endl; outRes << "Forecast"; for(int i=0; i<numOfExamples; ++i ) { outRes << " " << cascadeData[i].forecast; } outRes << endl; outRes << "Active"; for(int i=0; i<numOfExamples; ++i ) { if( cascadeData[i].active) outRes << " 1"; else outRes << " 0"; } outRes << endl; outRes << "Posteriors"; for(int i=0; i<numOfExamples; ++i ) { outRes << " " << cascadeData[i].score; } outRes << endl; } outRes.close(); // free memory allocation vector<vector<BaseLearner*> >::iterator bvIt; for( bvIt = weakHypotheses.begin(); bvIt != weakHypotheses.end(); ++bvIt ) { vector<BaseLearner* >::iterator bIt; for( bIt = (*bvIt).begin(); bIt != (*bvIt).end(); ++bIt ) delete *bIt; } }
bool Worker::operator() (Streamline<>& in, Streamline<>& out) const { out.clear(); out.index = in.index; out.weight = in.weight; if (!thresholds (in)) { // Want to test thresholds before wasting time on resampling if (inverse) in.swap (out); return true; } // Assign to ROIs if (properties.include.size() || properties.exclude.size()) { include_visited.assign (properties.include.size(), false); if (ends_only) { for (size_t i = 0; i != 2; ++i) { const Eigen::Vector3f& p (i ? in.back() : in.front()); properties.include.contains (p, include_visited); if (properties.exclude.contains (p)) { if (inverse) in.swap (out); return true; } } } else { for (const auto& p : in) { properties.include.contains (p, include_visited); if (properties.exclude.contains (p)) { if (inverse) in.swap (out); return true; } } } // Make sure all of the include regions were visited for (const auto& i : include_visited) { if (!i) { if (inverse) in.swap (out); return true; } } } if (properties.mask.size()) { // Split tck into separate tracks based on the mask vector<vector<Eigen::Vector3f>> cropped_tracks; vector<Eigen::Vector3f> temp; for (const auto& p : in) { const bool contains = properties.mask.contains (p); if (contains == inverse) { if (temp.size() >= 2) cropped_tracks.push_back (temp); temp.clear(); } else { temp.push_back (p); } } if (temp.size() >= 2) cropped_tracks.push_back (temp); if (cropped_tracks.empty()) return true; if (cropped_tracks.size() == 1) { cropped_tracks[0].swap (out); return true; } // Stitch back together in preparation for sending down queue as a single track out.push_back ({ NaN, NaN, NaN }); for (const auto& i : cropped_tracks) { for (const auto& p : i) out.push_back (p); out.push_back ({ NaN, NaN, NaN }); } return true; } else { if (!inverse) in.swap (out); return true; } }
bool Worker::operator() (const Tractography::Streamline<>& in, Tractography::Streamline<>& out) const { out.clear(); out.index = in.index; out.weight = in.weight; if (!thresholds (in)) { // Want to test thresholds before wasting time on upsampling; but if -inverse is set, // still need to apply both the upsampler and downsampler before writing to output if (inverse) { std::vector< Point<float> > tck (in); upsampler (tck); downsampler (tck); tck.swap (out); } return true; } // Upsample track before mapping to ROIs std::vector< Point<float> > tck (in); upsampler (tck); // Assign to ROIs if (properties.include.size() || properties.exclude.size()) { include_visited.assign (properties.include.size(), false); for (std::vector< Point<float> >::const_iterator p = tck.begin(); p != tck.end(); ++p) { properties.include.contains (*p, include_visited); if (properties.exclude.contains (*p)) { if (inverse) { downsampler (tck); tck.swap (out); } return true; } } // Make sure all of the include regions were visited for (std::vector<bool>::const_iterator i = include_visited.begin(); i != include_visited.end(); ++i) { if (!*i) { if (inverse) { downsampler (tck); tck.swap (out); } return true; } } } if (properties.mask.size()) { // Split tck into separate tracks based on the mask std::vector< std::vector< Point<float> > > cropped_tracks; std::vector< Point<float> > temp; for (std::vector< Point<float> >::const_iterator p = tck.begin(); p != tck.end(); ++p) { const bool contains = properties.mask.contains (*p); if (contains == inverse) { if (temp.size() >= 2) cropped_tracks.push_back (temp); temp.clear(); } else { temp.push_back (*p); } } if (temp.size() >= 2) cropped_tracks.push_back (temp); if (cropped_tracks.empty()) return true; // Apply downsampler independently to each for (std::vector< std::vector< Point<float> > >::iterator i = cropped_tracks.begin(); i != cropped_tracks.end(); ++i) downsampler (*i); if (cropped_tracks.size() == 1) { cropped_tracks[0].swap (out); return true; } // Stitch back together in preparation for sending down queue as a single track out.push_back (Point<float>()); for (std::vector< std::vector< Point<float> > >::const_iterator i = cropped_tracks.begin(); i != cropped_tracks.end(); ++i) { for (std::vector< Point<float> >::const_iterator p = i->begin(); p != i->end(); ++p) out.push_back (*p); out.push_back (Point<float>()); } out.push_back (Point<float>()); return true; } else { if (!inverse) { downsampler (tck); tck.swap (out); } return true; } }