int main(void) { int Mdim, Ndim, Pdim; // A[N][P], B[P][M], C[N][M] int szA, szB, szC; // number of elements in each matrix double start_time; // Starting time double run_time; // timing data Ndim = ORDER; Pdim = ORDER; Mdim = ORDER; szA = Ndim * Pdim; szB = Pdim * Mdim; szC = Ndim * Mdim; std::vector<float> A(szA); // Host memory for Matrix A std::vector<float> B(szB); // Host memory for Matrix B std::vector<float> C(szC); // Host memory for Matrix C initmat(Mdim, Ndim, Pdim, A, B, C); printf("\n===== Sequential, matrix mult (dot prod), order %d on host CPU ======\n",ORDER); float tmp; zero_mat(Ndim, Mdim, C); start_time = wtime(); for (int ii = 0; ii < Ndim; ii++) { for (int jj = 0; jj < Mdim; jj++) { tmp = 0.0f; for (int kk = 0; kk < Pdim; kk++) { /* C(ii,jj) = sum(over kk) A(ii,kk) * B(kk,jj) */ tmp += A[ii*Ndim+kk] * B[kk*Pdim+jj]; } C[ii*Ndim+jj] = tmp; } } run_time = wtime() - start_time; results(Mdim, Ndim, Pdim, C, run_time); return EXIT_SUCCESS; }
std::vector<double> PyRateC_HOMPP_lik( std::vector <int> ind, std::vector<double> ts, std::vector<double> te, double qRate, std::vector<double> gammaRates, double cov_par, double ex_rate) { double logDivisor = log((double)gammaRates.size()); std::vector<double> results(fossils.size(), 0); for(size_t i=0; i<ind.size(); ++i) { size_t iF = ind[i]; const double tl = ts[iF]-te[iF]; double nF = fossils[iF].size(); nF = (double)(fossils[iF].back() == 0 ? nF-1 : nF); if(gammaRates.size() > 1) { double spLogLik = 0.; for(size_t iG = 0; iG < gammaRates.size(); ++iG) { const double qGamma = gammaRates[iG]*qRate; const double qtl = qGamma*tl; const double logQ = log(qGamma); //lik1= -qGamma*(br_length) + log(qGamma)*k - sum(log(np.arange(1,k+1))) -log(1-exp(-qGamma*(br_length))) double spGammaLogLik = -qtl + nF*logQ - logFactorialFossilCntPerSpecie[iF] - log(1.-exp(-qtl)); if(iG == 0) spLogLik = spGammaLogLik; else spLogLik = LOG_PLUS(spLogLik,spGammaLogLik); } results[iF] = spLogLik-logDivisor; // Average the sum } else { // No gamma rates const double qtl = qRate*tl; const double logQ = log(qRate); // -q*(br_length) + log(q)*k - sum(log(np.arange(1,k+1))) - log(1-exp(-q*(br_length))) results[iF] = -qtl + nF*logQ - logFactorialFossilCntPerSpecie[iF] - log(1.-exp(-qtl)); } } return results; }
SparseCCS* SparseCCS::multiply_F(const SparseCCS& second) const { real *resultColumn = new real[rows]; Vector<real> results(rows * second.cols / 2); Vector<int> rowInds(rows * second.cols / 2); int i, k, l; SparseCCS *result = new SparseCCS(rows, second.cols); result->colptr[0] = 0; for (i = 0; i < rows; i++) { resultColumn[i] = 0; } for (i = 0; i < second.cols; i++) { for (k = second.colptr[i]; k < second.colptr[i + 1]; k++) { for (l = colptr[second.rowind[k]]; l < colptr[second.rowind[k] + 1]; l++) { resultColumn[rowind[l]] += second.vals[k] * vals[l]; } } for (k = 0; k < rows; k++) { if (resultColumn[k] != 0) { results.add(resultColumn[k]); rowInds.add(k); resultColumn[k] = 0; } } result->colptr[i + 1] = results.size(); } delete[] resultColumn; result->setNNZ(results.size()); for (i = 0; i < results.size(); i++) { result->vals[i] = results[i]; result->rowind[i] = rowInds[i]; } return result; }
vector<FinderPoint> Finder::orderBestPatterns() { if (possibleFinderCenters.size() < 3) { printf("Can't detect finder pattern\n"); exit(1); } float abDistance = distance(possibleFinderCenters[0], possibleFinderCenters[1]); float bcDistance = distance(possibleFinderCenters[1], possibleFinderCenters[2]); float acDistance = distance(possibleFinderCenters[0], possibleFinderCenters[2]); FinderPoint topLeft; FinderPoint topRight; FinderPoint bottomLeft; // Assume one closest to other two is top left; // topRight and bottomLeft will just be guesses below at first if (bcDistance >= abDistance && bcDistance >= acDistance) { topLeft = possibleFinderCenters[0]; topRight = possibleFinderCenters[1]; bottomLeft = possibleFinderCenters[2]; } else if (acDistance >= bcDistance && acDistance >= abDistance) { topLeft = possibleFinderCenters[1]; topRight = possibleFinderCenters[0]; bottomLeft = possibleFinderCenters[2]; } else { topLeft = possibleFinderCenters[2]; topRight = possibleFinderCenters[0]; bottomLeft = possibleFinderCenters[1]; } // Use cross product to figure out which of other1/2 is the bottom left // pattern. The vector "top-left -> bottom-left" x "top-left -> top-right" // should yield a vector with positive z component if ((bottomLeft.getY() - topLeft.getY()) * (topRight.getX() - topLeft.getX()) < (bottomLeft.getX() - topLeft.getX()) * (topRight.getY() - topLeft.getY())) { FinderPoint temp = topRight; topRight = bottomLeft; bottomLeft = temp; } vector<FinderPoint> results(3); results[0] = bottomLeft; results[1] = topLeft; results[2] = topRight; return results; }
flut::optimizer::vec_double optimizer::evaluate( const vector< vec_double >& pop ) { vector< double > results( pop.size(), 0.0 ); try { vector< std::pair< std::future< double >, index_t > > threads; for ( index_t eval_idx = 0; eval_idx < pop.size(); ++eval_idx ) { // first make sure enough threads are available while ( threads.size() >= max_threads() ) { for ( auto it = threads.begin(); it != threads.end(); ) { if ( it->first.wait_for( std::chrono::milliseconds( 1 ) ) == std::future_status::ready ) { // a thread is finished, lets add it to the results and make room for a new thread results[ it->second ] = it->first.get(); it = threads.erase( it ); } else ++it; } } // add new thread threads.push_back( std::make_pair( std::async( std::launch::async, func_, pop[ eval_idx ] ), eval_idx ) ); } // wait for remaining threads for ( auto& f : threads ) results[ f.second ] = f.first.get(); } catch ( std::exception& e ) { log::critical( "Error during multi-threaded evaluation: ", e.what() ); } catch ( ... ) { log::critical( "Unknown error during multi-threaded evaluation" ); } return results; }
Json::Value &GetTaskCommand::run(MushiSession &sess, Json::Value &command, Json::Value &ret, QScriptEngine &engine, MushiDB &db){ if(command["command"].asString()=="getTask"){ if (command.get("taskID","")==""){ ret["status"]= "error"; ret["command"]="getTask"; ret["message"]="Must have an ID to get a task"; throw ret; return ret; } Json::Value results(Json::arrayValue); std::ostringstream query; MushiDBResult *r; query << "SELECT t.id, t.title, t.description, t.percentComplete, t.estimate, t.createDate, t.originalEstimate" << " , t.reporterID, r.firstName as reporter_firstName, r.lastName as reporter_lastName, r.email as reporter_email " << " ,t.ownerId as ownerID, t.parentTaskID,t.dueDate, o.firstName as owner_firstName, o.lastName as owner_lastName, o.email as owner_email " << " ,s.name as status_name, s.isOpen as status_isOpen, s.id as status_id" << " ,ty.id as type_id,ty.name as type_name, ty.description as type_description" << " ,p.id as priority_id, p.name as priority_name, p.description as priority_description" << " FROM task t" << " LEFT JOIN user r on r.id = t.reporterID" << " LEFT JOIN user o on o.id = t.ownerID" << " LEFT JOIN status s on s.id = t.statusID" << " LEFT JOIN type ty on t.typeID = ty.id" << " LEFT JOIN Priority p on p.id = t.priorityID"; query << " WHERE t.id = " << db.escapeQuotes(command.get("taskID","").asCString()).toStdString().c_str(); r=db.query(query.str()); results=r->getNestedJson(); ret["status"]="success"; ret["results"]=results; } return ret; }
SrcType min_element_image(const cv::Mat_<SrcType>& src, BinaryPredicate comp) { #ifdef _OPENMP const int size = src.rows; const int max_blocks = omp_get_max_threads(); const int n_blocks = (size/max_blocks) > 0 ? max_blocks : size; std::vector<SrcType> results(n_blocks); #pragma omp parallel num_threads(n_blocks) { int thread_id = omp_get_thread_num(); SrcType thread_result = src(thread_id, 0); for(int y=thread_id; y<src.rows; y+=n_blocks) { const SrcType* src_x = src[y]; for(int x=0; x<src.cols; ++x) { if(comp(src_x[x], thread_result)) thread_result = src_x[x]; } } results[thread_id] = thread_result; } if(n_blocks > 1) { for(int i=1; i<n_blocks; ++i) { if(comp(results[i], results[0])) results[0] = results[i]; } return results[0]; } else { return results[0]; } #else SrcType result = src(0, 0); for(int y=0; y<src.rows; ++y) { const SrcType* src_x = src[y]; for(int x=0; x<src.cols; ++x) { if(comp(src_x[x], result)) result = src_x[x]; } } return result; #endif }
T parallel_accumulate(Iterator first, Iterator last, T init) { unsigned long const length = std::distance(first, last); if (!length) { return init; } unsigned long const min_per_thread = 25; unsigned long const max_threads = (length + min_per_thread - 1) / min_per_thread; unsigned long const hardware_threads = std::thread::hardware_concurrency(); unsigned long const num_threads = std::min(hardware_threads != 0 ? hardware_threads : 2, max_threads); unsigned long const block_size = length / num_threads; std::vector<T> results(num_threads); std::vector<std::thread> threads(num_threads - 1); Iterator block_start = first; for (unsigned long i = 0; i < (num_threads - 1); ++i) { Iterator block_end = block_start; std::advance(block_end, block_size); threads[i] = std::thread( accumulate_block<Iterator, T>(), block_start, block_end, std::ref(results[i])); block_start = block_end; } accumulate_block<std::vector<int>::iterator, int>()( block_start, last, results[num_threads - 1]); std::for_each(threads.begin(), threads.end(), std::mem_fn(&std::thread::join)); return std::accumulate(results.begin(), results.end(), init); }
///////////////////////////////////////////////////////////////////////////// // Test functions ///////////////////////////////////////////////////////////////////////////// static void DFTest() { CPLog1D problem; CResults results(problem.GetDimensions()); CPFQuadratic pf(problem.GetDimensions()); CRegression reg(results, pf); CSPUniform sp(problem.GetDimensions(), -1, 1); // sp.Seed(0); // problem.Seed(1); for (int i = 100; --i >= 0;) { const double *v = sp.NextSample(results.GetSamples()); COutcome r = problem.GetOutcome(v); results.AddSample(v, r); } results.Refresh(); // CDFRatingLCB df(reg, 1.96); CDFVarianceAlpha df(reg); // CDFVarianceDelta df(reg); const int Points = 20; double v[1]; for (int i = 0; i <= Points; i++) { double x = -1.0 + 2.0 * i / Points; v[0] = x; double y = df.GetOutput(v); df.ComputeGradient(); double g = df.GetGradient()[0]; df.CDFVariance::ComputeGradient(); double h = df.GetGradient()[0]; std::cout << std::setw(13) << x << std::setw(13) << y << std::setw(13) << g << std::setw(13) << h; std::cout << '\n'; } }
double MgpuBenchmark(searchEngine_t engine, int count, CuDeviceMem* values, searchType_t type, CuDeviceMem* btree, int numIterations, int numQueries, CuDeviceMem* keys, CuDeviceMem* indices, const T* valuesHost, const T* keysHost) { CuEventTimer timer; timer.Start(); int size = (SEARCH_TYPE_INT32 == type) ? 4 : 8; int offset = 0; for(int it(0); it < numIterations; ++it) { offset += RoundUp(numQueries, 32); if(offset + numQueries > MaxQuerySize) offset = 0; searchStatus_t status = searchKeys(engine, count, type, values->Handle(), SEARCH_ALGO_LOWER_BOUND, keys->Handle() + offset * size, numQueries, btree->Handle(), indices->Handle()); if(SEARCH_STATUS_SUCCESS != status) { printf("FAIL!\n"); exit(0); } } double elapsed = timer.Stop(); double throughput = (double)numQueries * numIterations / elapsed; // Verify the results for the last set of queries run. std::vector<uint> results(numQueries); indices->ToHost(results); for(int i(0); i < numQueries; ++i) { const T* lower = std::lower_bound(valuesHost, valuesHost + count, keysHost[offset + i]); if((lower - valuesHost) != results[i]) { printf("Failure in MGPU Search.\n"); exit(0); } } return throughput; }
vector<DataPoint> SpeechKMeans::WeightedKMeans(vector<DataPoint> &points, vector<double> &weights, int k) { KMterm term(100, 0, 0, 0, // run for 100 stages 0.10, 0.10, 3, // other typical parameter values 0.50, 10, 0.95); int dim = problems_.num_features(); // dimension int nPts = points.size(); // number of data points KMdata dataPts(dim, nPts); // allocate data storage for (int p = 0; p < nPts; ++p) { dataPts[p] = new double[dim]; for (int i = 0; i < dim; ++i) { dataPts[p][i] = points[p][i]; } } //kmUniformPts(dataPts.getPts(), nPts, dim); // generate random points dataPts.buildKcTree(); // build filtering structure KMfilterCenters ctrs(k, dataPts); // allocate centers // run the algorithm //KMlocalLloyds kmAlg(ctrs, term); // repeated Lloyd's // KMlocalSwap kmAlg(ctrs, term); // Swap heuristic // KMlocalEZ_Hybrid kmAlg(ctrs, term); // EZ-Hybrid heuristic KMlocalHybrid kmAlg(ctrs, term); // Hybrid heuristic ctrs = kmAlg.execute(); // execute // print number of stages cout << "Number of stages: " << kmAlg.getTotalStages() << "\n"; // print average distortion cout << "Average distortion: " << ctrs.getDist(false)/nPts << "\n"; ctrs.print(); // print final centers cerr << "copying points" << endl; vector<DataPoint> results(k); for (int j = 0; j < k; ++j) { results[j].resize(dim); for (int i = 0; i < dim; ++i) { results[j][i] = ctrs.getCtrPts()[j][i]; } } cerr << "done copying points" << endl; return results; }
int main(void) { int N; // A[N][N], B[N][N], C[N][N] int sz; // number of elements in each matrix float tmp; N = ORDER; sz = N * N; std::vector<float> A(sz); // Matrix A std::vector<float> B(sz); // Matrix B std::vector<float> C(sz); // Matrix C initmat(N, N, N, A, B, C); printf("\n===== Sequential, matrix mult (dot prod), order %d on CPU ======\n",ORDER); zero_mat(N, N, C); util::Timer timer; for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { tmp = 0.0f; for (int k = 0; k < N; k++) { tmp += A[i*N+k] * B[k*N+j]; } C[i*N+j] = tmp; } } double rtime = static_cast<double>(timer.getTimeMilliseconds()) / 1000.0; results(N, N, N, C, rtime); }
bool CSSParserImpl::parseDeclarationList(MutableStylePropertySet* declaration, const String& string, const CSSParserContext& context) { CSSParserImpl parser(context); StyleRule::Type ruleType = StyleRule::Style; if (declaration->cssParserMode() == CSSViewportRuleMode) ruleType = StyleRule::Viewport; CSSTokenizer::Scope scope(string); parser.consumeDeclarationList(scope.tokenRange(), ruleType); if (parser.m_parsedProperties.isEmpty()) return false; BitArray<numCSSProperties> seenProperties; size_t unusedEntries = parser.m_parsedProperties.size(); WillBeHeapVector<CSSProperty, 256> results(unusedEntries); filterProperties(true, parser.m_parsedProperties, results, unusedEntries, seenProperties); filterProperties(false, parser.m_parsedProperties, results, unusedEntries, seenProperties); if (unusedEntries) results.remove(0, unusedEntries); return declaration->addParsedProperties(results); }
void QGeoCodeReplyNokia::networkFinished() { if (!m_reply) return; if (m_reply->error() != QNetworkReply::NoError) return; QGeoCodeXmlParser *parser = new QGeoCodeXmlParser; parser->setBounds(viewport()); connect(parser, SIGNAL(results(QList<QGeoLocation>)), this, SLOT(appendResults(QList<QGeoLocation>))); connect(parser, SIGNAL(error(QString)), this, SLOT(parseError(QString))); m_parsing = true; parser->parse(m_reply->readAll()); m_reply->deleteLater(); m_reply = 0; }
void ProjectBase::removeResult(OMCase* result,bool saveProject ) { int num = results()->items.indexOf(result); if(num>-1) { // result to be removed emit beforeRemoveResult(dynamic_cast<Result*>(result)); // remove folder and data QDir folder(result->saveFolder()); if(folder!=QDir(resultsFolder())) LowTools::removeDir(folder.absolutePath()); _results->removeRow(num); if(saveProject) save(false); } }
vector<int> topKFrequent(vector<int>& nums, int k) { unordered_map<int, int> mp; for(int num : nums) { mp[num]++; } vector<pair<int, int>> freq; for(auto e : mp) { freq.push_back(make_pair(e.second, e.first)); } sort(freq.begin(), freq.end(), compfunc); vector<int> results(k, 0); for(int i = 0; i < k; i++) { results[i] = freq[i].second; } return results; }
int main(int argc, char **argv) { for(i=0;i<N;i++) { x[i] = i; y[i] = i; theta[i] = 6*random(); } // Almacenamos tiempo capturado initialTime = getTime(); for(ii=0; ii<N; ++ii) { sin6[ii] = sin(6*theta[ii]); cos6[ii] = cos(6*theta[ii]); } for(i=0; i<N; ++i) for(j=i+1; j<N; ++j) { Dx = x[i]-x[j]; Dy = y[i]-y[j]; r = sqrt(Dx*Dx+Dy*Dy); g[r] += cos6[i]*cos6[j]+sin6[i]*sin6[j]; ++count[r]; } for (r = 0; r < MAX_R; r++) g[r] = g[r] / count[r]; // Calculando lo que ha tomado el calculo finalTime = getTime(); // Imprimimos resultados results(); return 0; }
F for_each(I first, I last, F f) { auto length = std::distance(first, last); typedef decltype(length) dist_type; if (length != 0) { /** * First work out how many blocks to divide the sequence * into. */ dist_type min_per_thread = 25; dist_type max_threads = (length + min_per_thread - 1) / min_per_thread; dist_type hardware_threads = std::thread::hardware_concurrency(); dist_type num_threads = std::min(hardware_threads != 0? hardware_threads : 2, max_threads); dist_type block_size = length / num_threads; /** * Subdivide the for_each algorithm into `num_threads` tasks. */ std::vector<std::future<F> > results(num_threads - 1); auto block_begin = first; for (auto &result : results) { auto block_end = block_begin; std::advance(block_end, block_size); result = std::async(std::for_each<I, F>, block_begin, block_end, f); block_begin = block_end; } /** * Finally, run the last task in the current thread. */ std::for_each(block_begin, last, f); /** * Don't exit until tasks are complete. */ for (auto &result : results) { result.get(); } } return f; }
void swakExpressionFunctionObject::writeData(CommonValueExpressionDriver &driver) { Field<T> result=driver.getResult<T>(); Field<T> results(accumulations_.size()); forAll(accumulations_,i) { const word &aName=accumulations_[i]; T val=pTraits<T>::zero; if(aName=="min") { val=gMin(result); } else if(aName=="max") { val=gMax(result); } else if(aName=="sum") { val=gSum(result); } else if(aName=="average") { val=gAverage(result); } else { WarningIn("swakExpressionFunctionObject::writeData") << "Unknown accumultation type " << aName << ". Currently only 'min', 'max', 'sum' and 'average' are supported" << endl; } results[i]=val; if(verbose()) { Info << " " << aName << "=" << val; } } if (Pstream::master()) { unsigned int w = IOstream::defaultPrecision() + 7; OFstream& o=*filePtrs_[name()]; o << setw(w) << time().value(); forAll(results,i) { o << setw(w) << results[i]; } o << nl; }
bool WignerDStrategy::execute(ParameterList& paras, std::shared_ptr<AbsParameter>& out) { #ifdef DEBUG if( checkType != out->type() ) { throw( WrongParType( std::string("Output Type ") +ParNames[out->type()]+std::string(" conflicts expected type ") +ParNames[checkType]+std::string(" of ")+name+" Wigner strat") ); return false; } #endif double _inSpin = paras.GetDoubleParameter(0)->GetValue(); double _outSpin1 = paras.GetDoubleParameter(1)->GetValue(); double _outSpin2 = paras.GetDoubleParameter(2)->GetValue(); ComPWA::Physics::DPKinematics::DalitzKinematics* kin = dynamic_cast<ComPWA::Physics::DPKinematics::DalitzKinematics*>( Kinematics::instance() ); std::shared_ptr<MultiDouble> _angle = paras.GetMultiDouble(0); std::vector<double> results(_angle->GetNValues(), 0.); for(unsigned int ele=0; ele<_angle->GetNValues(); ele++){ try{ results.at(ele)=AmpWigner2::dynamicalFunction( _inSpin,_outSpin1,_outSpin2,_angle->GetValue(ele) ); } catch (std::exception &ex) { BOOST_LOG_TRIVIAL(error) << "WignerDStrategy::execute() | " <<ex.what(); throw std::runtime_error("WignerDStrategy::execute() | " "Evaluation of dynamical function failed!"); } }//end element loop out = std::shared_ptr<AbsParameter>( new MultiDouble(out->GetName(),results)); return true; }
void pcl::FernEvaluator<FeatureType, DataSet, LabelType, ExampleIndex, NodeType>::evaluate ( pcl::Fern<FeatureType, NodeType> & fern, pcl::FeatureHandler<FeatureType, DataSet, ExampleIndex> & feature_handler, pcl::StatsEstimator<LabelType, NodeType, DataSet, ExampleIndex> & stats_estimator, DataSet & data_set, std::vector<ExampleIndex> & examples, std::vector<LabelType> & label_data) { const size_t num_of_examples = examples.size (); const size_t num_of_branches = stats_estimator.getNumOfBranches (); const size_t num_of_features = fern.getNumOfFeatures (); label_data.resize (num_of_examples); std::vector<std::vector<float> > results (num_of_features); std::vector<std::vector<unsigned char> > flags (num_of_features); std::vector<std::vector<unsigned char> > branch_indices (num_of_features); for (size_t feature_index = 0; feature_index < num_of_features; ++feature_index) { results[feature_index].reserve (num_of_examples); flags[feature_index].reserve (num_of_examples); branch_indices[feature_index].reserve (num_of_examples); feature_handler.evaluateFeature (fern.accessFeature (feature_index), data_set, examples, results[feature_index], flags[feature_index]); stats_estimator.computeBranchIndices (results[feature_index], flags[feature_index], fern.accessThreshold (feature_index), branch_indices[feature_index]); } for (size_t example_index = 0; example_index < num_of_examples; ++example_index) { size_t node_index = 0; for (size_t feature_index = 0; feature_index < num_of_features; ++feature_index) { node_index *= num_of_branches; node_index += branch_indices[feature_index][example_index]; } label_data[example_index] = stats_estimator.getLabelOfNode (fern[node_index]); } }
void World::step(float h) { #ifdef SMP std::vector<std::future<void>> results(_cores); unsigned linksCount = _links.size(); for(unsigned i = 0; i < _cores; i++) { unsigned i_start = i * linksCount / _cores, i_stop = (i + 1) * linksCount / _cores; results[i] = std::async(std::launch::async, [this, &h, i_start, i_stop]() { for(unsigned i = i_start; i < i_stop; i++) { _links[i]->step(h); } }); } for(unsigned i = 0; i < _cores; i++) { results[i].wait(); } unsigned bodiesCount = _bodies.size(); for(unsigned i = 0; i < _cores; i++) { unsigned i_start = i * bodiesCount / _cores, i_stop = (i + 1) * bodiesCount / _cores; results[i] = std::async(std::launch::async, [this, &h, i_start, i_stop]() { for(unsigned i = i_start; i < i_stop; i++) _bodies[i]->step(h); }); } for(unsigned i = 0; i < _cores; i++) { results[i].wait(); } #else for(unsigned i = 0; i < _links.size(); i++) { _links[i]->step(h); } for(unsigned i = 0; i < _bodies.size(); i++) { _bodies[i]->step(h); } #endif }
//! Reads a set of registration results RegistrationResults read_registration_results(const String &filename) { std::ifstream in(filename.c_str(), std::ios::in | std::ios::binary); unsigned int n_records = 0; String line; while (!in.eof()) { getline(in, line); if (line[0] == '#') { continue; } else { n_records = std::atoi(line.c_str()); break; } } RegistrationResults results(n_records); for (unsigned int i = 0; i < n_records; ++i) { getline(in, line); results[i].read(line); } in.close(); return results; }
// static void JCFloaterAreaSearch::onCommitLine(LLLineEditor* line, void* user_data) { std::string name = line->getName(); std::string text = line->getText(); line->setText(text); if (name == "Name query chunk") {sSearchedName = text; sSearchingName = (text.length() > 3);} else if (name == "Description query chunk") {sSearchedDesc = text; sSearchingDesc = (text.length() > 3);} else if (name == "Owner query chunk") {sSearchedOwner = text; sSearchingOwner = (text.length() > 3);} else if (name == "Group query chunk") {sSearchedGroup = text; sSearchingGroup = (text.length() > 3);} if (text.length() > 3) { checkRegion(); results(); } else { } }
MojErr MojDbServiceHandler::handleQuotaStats(MojServiceMessage* msg, MojObject& payload, MojDbReq& req) { MojAssert(msg); MojLogTrace(s_log); MojObject results(MojObject::TypeObject); MojErr err = m_db.quotaStats(results, req); MojErrCheck(err); MojObjectVisitor& writer = msg->writer(); err = writer.beginObject(); MojErrCheck(err); err = writer.boolProp(MojServiceMessage::ReturnValueKey, true); MojErrCheck(err); err = writer.objectProp(MojDbServiceDefs::ResultsKey, results); MojErrCheck(err); err = writer.endObject(); MojErrCheck(err); return MojErrNone; }
void SquaredNormExpression::evaluateImpl( const MatrixID<const Interval>& parameterID, const MatrixID<Interval>& resultID, ExpressionCompiler<Interval>& expressionCompiler ) const { expressionCompiler.compute( expressionCompiler.evaluate(operand(), parameterID), resultID, [] (ConstIntervalMatrixViewXd operandValues, IntervalMatrixViewXd results) { for (int columnIndex = 0; columnIndex < results.numColumns(); ++columnIndex) { results(0, columnIndex) = operandValues.column(columnIndex).fold( Interval(0.0), [] (Interval result, Interval value) { return result + value * value; } ); } } ); }
std::vector< CompletionData > TranslationUnit::CandidatesForLocation( const std::string &filename, int line, int column, const std::vector< UnsavedFile > &unsaved_files ) { unique_lock< mutex > lock( clang_access_mutex_ ); if ( !clang_translation_unit_ ) { return std::vector< CompletionData >(); } std::vector< CXUnsavedFile > cxunsaved_files = ToCXUnsavedFiles( unsaved_files ); const CXUnsavedFile *unsaved = cxunsaved_files.empty() ? nullptr : &cxunsaved_files[ 0 ]; // codeCompleteAt reparses the TU if the underlying source file has changed on // disk since the last time the TU was updated and there are no unsaved files. // If there are unsaved files, then codeCompleteAt will parse the in-memory // file contents we are giving it. In short, it is NEVER a good idea to call // clang_reparseTranslationUnit right before a call to clang_codeCompleteAt. // This only makes clang reparse the whole file TWICE, which has a huge impact // on latency. At the time of writing, it seems that most users of libclang // in the open-source world don't realize this (I checked). Some don't even // call reparse*, but parse* which is even less efficient. CodeCompleteResultsWrap results( clang_codeCompleteAt( clang_translation_unit_, filename.c_str(), line, column, const_cast<CXUnsavedFile *>( unsaved ), cxunsaved_files.size(), CompletionOptions() ), clang_disposeCodeCompleteResults ); std::vector< CompletionData > candidates = ToCompletionDataVector( results.get() ); return candidates; }
/** * This is the main routine of "MonoCrosser", and implements a monotonic strategy on multiple curves. * Finds crossings between two sets of paths, yielding a CrossingSet. [0, a.size()) of the return correspond * to the sorted crossings of a with paths of b. The rest of the return, [a.size(), a.size() + b.size()], * corresponds to the sorted crossings of b with paths of a. * * This function does two sweeps, one on the bounds of each path, and after that cull, one on the curves within. * This leads to a certain amount of code complexity, however, most of that is factored into the above functions */ CrossingSet MonoCrosser::crossings(std::vector<Path> const &a, std::vector<Path> const &b) { if(b.empty()) return CrossingSet(a.size(), Crossings()); CrossingSet results(a.size() + b.size(), Crossings()); if(a.empty()) return results; std::vector<std::vector<double> > splits_a = paths_mono_splits(a), splits_b = paths_mono_splits(b); std::vector<std::vector<Rect> > bounds_a = split_bounds(a, splits_a), bounds_b = split_bounds(b, splits_b); std::vector<Rect> bounds_a_union, bounds_b_union; for(unsigned i = 0; i < bounds_a.size(); i++) bounds_a_union.push_back(union_list(bounds_a[i])); for(unsigned i = 0; i < bounds_b.size(); i++) bounds_b_union.push_back(union_list(bounds_b[i])); std::vector<std::vector<unsigned> > cull = sweep_bounds(bounds_a_union, bounds_b_union); Crossings n; for(unsigned i = 0; i < cull.size(); i++) { for(unsigned jx = 0; jx < cull[i].size(); jx++) { unsigned j = cull[i][jx]; unsigned jc = j + a.size(); Crossings res; //Sweep of the monotonic portions std::vector<std::vector<unsigned> > cull2 = sweep_bounds(bounds_a[i], bounds_b[j]); for(unsigned k = 0; k < cull2.size(); k++) { for(unsigned lx = 0; lx < cull2[k].size(); lx++) { unsigned l = cull2[k][lx]; mono_pair(a[i], splits_a[i][k-1], splits_a[i][k], b[j], splits_b[j][l-1], splits_b[j][l], res, .1); } } for(unsigned k = 0; k < res.size(); k++) { res[k].a = i; res[k].b = jc; } merge_crossings(results[i], res, i); merge_crossings(results[i], res, jc); } } return results; }
int main(int argc, char **argv) { for(i=0;i<N;i++) { data[i].x = i; data[i].y = i; theta[i] = 6*random(); } // Almacenamos tiempo capturado initialTime = getTime(); for (ii = 0; ii < N; ii++) { data[ii].cos6 = cos(6*theta[ii]); data[ii].sin6 = sin(6*theta[ii]); } for(i=0; i<N; ++i) for(j=i+1; j<N; ++j) { Dx = data[i].x - data[j].x; Dy = data[i].y - data[j].y; r = sqrt(Dx*Dx + Dy*Dy); accum[r].g += data[i].cos6 * data[j].cos6 + data[i].sin6 * data[j].sin6; ++accum[r].count; } for (r = 0; r < MAX_R; r++) accum[r].g = accum[r].g / accum[r].count; // Calculando lo que ha tomado el calculo finalTime = getTime(); // Imprimimos resultados results(); return 0; }
const CMTSumCheckResults CMTSumCheckVerifier:: run() { MPZVector expected(trueSums); MPZVector poly(conf.batchSize() * polySize); CMTSumCheckResults results(numRounds(), conf.batchSize()); results.success = true; for (int i = 0; i < numRounds(); i++) { results.success = results.success && doRound(expected, results, i, poly, expected); } results.success = results.success && doFinalCheck(results, expected); return results; }