bool FileCSV::read(TimeSeries &ts, int valuesColumn, int timeColumn) { if (!isOpen() || valuesColumn < 0 || valuesColumn >= (int)m_header.size() || timeColumn < 0 || timeColumn >= (int)m_header.size()) return false; seekRow(0); Row row; ReadResult rr; FL::TimeSeries::Data values, time; while ( (rr = readRow(row)) == rrOK ) { values.push_back(row[valuesColumn]); time.push_back(row[timeColumn]); } ts.setData(values, time); ts.header()[0] = m_header[valuesColumn]; ts.header()[1] = m_header[timeColumn]; return rr == rrEmpty; }
// Iterate from an initial state void DynSysModel::Iterate( NTuple& initial_state, size_t length, TimeSeries& result ) { // Make sure the result is empty result.clear(); // Save the initial state as the first time series entry result.push_back( initial_state ); mState = initial_state; NTuple next_state; PolyModelIter iter; size_t i,k; for( i = length-1; i > 0; --i ) { k = 1; // For each polynomial; next_state.Reset(); iter = mModel.begin(); while( iter != mModel.end() ) { // Evaluate the k'th polynomial at the current state // to produce a new value for the k'th variable next_state.Assign( k, mModel[k-1].Evaluate( mState ) ); // next polynomial ++iter; ++k; } // Update the current state to be the newly compute state mState = next_state; result.push_back( next_state ); } }
TEST(StatisticsTest, Statistics) { // Create a distribution of 10 values from -5 to 4. TimeSeries<double> timeseries; Time now = Clock::now(); for (int i = -5; i <= 5; ++i) { now += Seconds(1); timeseries.set(i, now); } Option<Statistics<double>> statistics = Statistics<double>::from(timeseries); EXPECT_SOME(statistics); EXPECT_EQ(11u, statistics->count); EXPECT_DOUBLE_EQ(-5.0, statistics->min); EXPECT_DOUBLE_EQ(5.0, statistics->max); EXPECT_DOUBLE_EQ(0.0, statistics->p50); EXPECT_DOUBLE_EQ(4.0, statistics->p90); EXPECT_DOUBLE_EQ(4.5, statistics->p95); EXPECT_DOUBLE_EQ(4.9, statistics->p99); EXPECT_DOUBLE_EQ(4.99, statistics->p999); EXPECT_DOUBLE_EQ(4.999, statistics->p9999); }
void FloodPlot::timeseriesData(TimeSeries tsData) { if (tsData.values().empty()){ return; } m_startDateTime = tsData.firstReportDateTime(); m_endDateTime = tsData.firstReportDateTime() + Time(tsData.daysFromFirstReport(tsData.daysFromFirstReport().size()-1)); m_duration = (m_endDateTime-m_startDateTime).totalDays(); m_xAxisMin = 0.0; m_xAxisMax = m_duration; if (m_plot2DTimeAxis == NULL) { m_plot2DTimeAxis = new Plot2DTimeAxis(m_startDateTime, m_duration); m_qwtPlot->setAxisTitle(QwtPlot::xBottom, " Simulation Time"); m_qwtPlot->setAxisScale(QwtPlot::xBottom, 0, m_duration); m_qwtPlot->setAxisScaleDraw(QwtPlot::xBottom, m_plot2DTimeAxis); m_qwtPlot->setAxisLabelRotation(QwtPlot::xBottom, -90.0); m_qwtPlot->setAxisLabelAlignment(QwtPlot::xBottom, Qt::AlignLeft | Qt::AlignBottom); } else { m_plot2DTimeAxis->startDateTime(m_startDateTime); m_plot2DTimeAxis->duration(m_duration); } TimeSeriesFloodPlotData::Ptr data = TimeSeriesFloodPlotData::create(tsData); floodPlotData(data); }
void TimeSeriesTest::testConstruction() { BOOST_MESSAGE("Testing time series construction..."); TimeSeries<Real> ts; ts[Date(25, March, 2005)] = 1.2; ts[Date(29, March, 2005)] = 2.3; ts[Date(15, March, 2005)] = 0.3; TimeSeries<Real>::const_iterator cur = ts.begin(); if (cur->first != Date(15, March, 2005)) { BOOST_ERROR("date does not match"); } if (cur->second != 0.3) { BOOST_ERROR("value does not match"); } ts[Date(15, March, 2005)] = 4.0; cur = ts.begin(); if (cur->second != 4.0) { BOOST_ERROR("replaced value does not match" << cur->second << "\n"); } ts[Date(15, March, 2005)] = 3.5; if (cur->second != 3.5) { BOOST_ERROR("set value operator not match" << cur->second << "\n"); } }
FL::ParseResult AB::analyze( const TimeSeries &ts, Forest &forest, Patterns::Matcher &matcher, PatternsSet &patterns, MetricsSet &metrics) { ParseResult result; try { if (ts.size() < 2) throw EAnalyze(E_INVALID_INPUT); Tree *tree = new Tree(ts); const int up = IDGenerator::idOf("a"); const int down = IDGenerator::idOf("b"); for (int i = 0; i < ts.size()-1; i += 1) { const int &id = (ts.value(i) <= ts.value(i+1)) ? up : down; tree->add(new Node(NULL, id, i, i+1, 0)); } forest.push_back(tree); result.treesAdded = 1; result.nodesAdded = (ts.size() + 1) / 2; } catch (const EAnalyze &e) { m_lastError = e; } return result; }
Datum blocker(const RealMatrix& Ua, const RealMatrix sa, const vGroup& ensemble, const uint blocksize, uint repeats, ExtractPolicy& policy) { TimeSeries<double> coverlaps; for (uint i=0; i<repeats; ++i) { vector<uint> picks = pickFrames(ensemble.size(), blocksize); if (debug) { cerr << "***Block " << blocksize << ", replica " << i << ", picks " << picks.size() << endl; dumpPicks(picks); } vGroup subset = subgroup(ensemble, picks); boost::tuple<RealMatrix, RealMatrix> pca_result = pca(subset, policy); RealMatrix s = boost::get<0>(pca_result); RealMatrix U = boost::get<1>(pca_result); if (length_normalize) for (uint j=0; j<s.rows(); ++j) s[j] /= blocksize; coverlaps.push_back(covarianceOverlap(sa, Ua, s, U)); } return( Datum(coverlaps.average(), coverlaps.variance(), coverlaps.size()) ); }
template <typename TimeSeries> typename TimeSeries::value_type empirical_average(TimeSeries const &t) { auto si = t.size(); if (si == 0) return typename TimeSeries::value_type{}; auto sum = t[0]; for (int i = 1; i < si; ++i) sum += t[i]; return sum / t.size(); }
TimeSeries<double> MLGDao::getSignal( oid_t nodeId, oid_t bottomLayer, oid_t topLayer ) { #ifdef MLD_SAFE if( bottomLayer == Objects::InvalidOID || topLayer == Objects::InvalidOID || nodeId == Objects::InvalidOID ) { LOG(logERROR) << "MLGDao::getSignal: invalid ids"; return TimeSeries<double>(); } #endif // Use as less overhead as possible TimeSeries<double> res; oid_t layer = bottomLayer; type_t oType = m_link->olinkType(); Value v; while( layer != topLayer ) { oid_t eid = findEdge(oType, layer, nodeId); m_g->GetAttribute(eid, m_g->FindAttribute(oType, Attrs::V[OLinkAttr::WEIGHT]), v); res.data().push_back(v.GetDouble()); layer = m_layer->parent(layer); } // Don't forget last layer, it is inclusive oid_t eid = findEdge(oType, layer, nodeId); m_g->GetAttribute(eid, m_g->FindAttribute(oType, Attrs::V[OLinkAttr::WEIGHT]), v); res.data().push_back(v.GetDouble()); res.clamp(); return res; }
/* ********************************************************** * * Evaluate the detection score function of a window of frames * * *********************************************************** */ int evalKer(Eigen::MatrixXd Ds, TimeSeries TS, Eigen::MatrixXd w, double b, int minSegLen, int maxSegLen, int segStride, int d, int sd, int featType, double thresh) { int n = Ds.cols(); double minth = -125.803; double maxth = 130.957; if ((minSegLen < 1) || (maxSegLen < minSegLen) || (segStride < 1)) std::cout << "crtFeatWindow: evalKer: invalid option for sOpt"; if (minSegLen > n) std::cout << "crtFeatWindow: evalKer: minimum segment length is greater than the time series length" << std::endl; if (maxSegLen > n) maxSegLen = n; //Time series options TS.D = Ds.data(); TS.n = n; if (featType == FEAT_BAG) { TS.IntD = new double[d*(n + 1)]; cmpIntIm(TS.D, d, n, TS.IntD); } else if (featType == FEAT_ORDER) { TS.sd = sd; } TS.setSegLst(minSegLen, maxSegLen, segStride); TS.updateSegLstVals(w.data(), b); //Event mxEv; double mxVal = -std::numeric_limits<double>::infinity(); int curIdx = 0, segLstSz = TS.segLst.size(); for (int t = minSegLen; t<n; t++) { if (curIdx < segLstSz) { // there are more segments to consider ExEvent curSeg = TS.segLst[curIdx]; while (curSeg.e <= t) { mxVal = curSeg.val; //Detect the Event (if detect score sup to a threshold) if (mxVal > thresh) { //std::cout<<"sortie "<<mxVal << " > " <<thresh <<std::endl; return 0; } curIdx++; if (curIdx >= segLstSz) break; curSeg = TS.segLst[curIdx]; } } } //std::cout << "crtFeatWindow: evalKer: No event was found" << std::endl; return -1; }
TEST(StatisticsTest, Single) { TimeSeries<double> timeseries; timeseries.set(0); EXPECT_NONE(Statistics<double>::from(timeseries)); }
binned_series(TimeSeries const& t, int bin_size_) : bin_size(bin_size_), binned(t.size() / bin_size_, value_type{}) { if (bin_size_ > t.size()) TRIQS_RUNTIME_ERROR << "bin size (" << bin_size_ << ") cannot be larger than size (" << t.size() << ") of time series"; for (int i = 0; i < size(); i++) { for (int j = 0; j < bin_size; j++) binned[i] += t[i * bin_size + j]; binned[i] /= bin_size; } }
template <typename TimeSeries> typename TimeSeries::value_type empirical_variance(TimeSeries const &t) { auto si = t.size(); if (si == 0) return typename TimeSeries::value_type{}; auto avg = empirical_average(t); decltype(avg) sum2 = (t[0] - avg) * (t[0] - avg); // also valid if t[0] is an array e.g., i.e. no trivial contructor... for (int i = 1; i < si; ++i) sum2 += (t[i] - avg) * (t[i] - avg); return sum2 / t.size(); }
void BFFindMotif::FindMotifSub(std::deque<Point> &window) { size_t i = 0; for(i = 0; i < window.size() - m_MotifLength; ++i) { double distance = 0.0; bool newMotif = true; TimeSeries ts; ts.reserve(m_MotifLength); if(m_SlideWindow.size() >= m_MotifLength) // Only process slide window larger than motif length { // Get time series for(size_t j = i; j < i + m_MotifLength; ++j) { ts.push_back(window[j].second); } // Compare with candidate motif for(size_t j = 0; j < m_CandidateMotif.size(); ++j) { distance = EuclideanDistance(m_CandidateMotif[j].second, ts); if((2 * m_Radius > distance) && (m_Radius < distance)) // Neither new motif nor similar motif { newMotif = false; } else if(m_Radius > distance) // Similar motif { m_CandidateMotif[j].first++; i += m_Step; newMotif = false; break; // Impossible to be similar with other candidates } } if(true == newMotif) // New motif { m_CandidateMotif.push_back(make_pair<long long, TimeSeries>(1, ts)); i += m_Step; } } else { cerr << "Window size:" << m_SlideWindow.size() << endl; } } for(size_t k = 0; k < i; ++k) { window.pop_front(); } }
std::vector<Real> IntervalPrice::extractValues( const TimeSeries<IntervalPrice>& ts, IntervalPrice::Type t) { std::vector<Real> returnval; returnval.reserve(ts.size()); for (TimeSeries<IntervalPrice>::const_iterator i = ts.begin(); i != ts.end(); ++i) { returnval.push_back(i->second.value(t)); } return returnval; }
// Compute HammingDistance between two time series size_t NTuple::HammingDistance( TimeSeries& t1, TimeSeries& t2 ) { size_t h = 0; TimeSeriesIter iter1 = t1.begin(); TimeSeriesIter iter2 = t2.begin(); while( iter1 != t1.end() && iter2 != t2.end() ) { // Accumulate the Hamming distance for each NTuple h += *iter1++ - *iter2++; } return h; }
void Distance::calcCost(const TimeSeries &ts1, const TimeSeries &ts2, double *table_d, double *table_g, int len1, int len2) { for (int i = 0; i < len1; i++) { RandomVariable r1 = ts1.at(i); for (int j = 0; j < len2; j++) { RandomVariable r2 = ts2.at(j); table_d[i * len2 + j] = difference(r1, r2); } } }
double AnalyticSignal::calculate(const TimeSeries& ts, int modeNo, const string& prefix) { const vector<double>& xs = ts.getXs(); const vector<double>& realSignal = ts.getYs(); unsigned n = realSignal.size(); double xStep = xs[1] - xs[0]; // Assuming even sampling fftw_complex* conjugatedSignal = (fftw_complex*) malloc(sizeof(fftw_complex) * n); for (unsigned i = 0; i < n; i++) { conjugatedSignal[i][0] = realSignal[i]; conjugatedSignal[i][1] = 0; } fft(true, realSignal.size(), conjugatedSignal, 0); fft(false, realSignal.size(), conjugatedSignal, M_PI_2); //double* amplitude = (double*) malloc(sizeof(double) * n); //double* phase = (double*) malloc(sizeof(double) * n); //double* frequency = (double*) malloc(sizeof(double) * (n - 2)); ofstream imfStream(prefix + "_imf_" + to_string(modeNo) + ".csv"); ofstream ampStream(prefix + "_amp_" + to_string(modeNo) + ".csv"); ofstream freqStream(prefix + "_freq_" + to_string(modeNo) + ".csv"); ofstream perStream(prefix + "_per_" + to_string(modeNo) + ".csv"); double totalEnergy = 0; double prevZeroCross = xs[0]; for (unsigned i = 0; i < n; i++) { double x = xs[i]; double u = realSignal[i]; double v = conjugatedSignal[i][0] / n; // the fft is unnormalized double u2v2 = u * u + v * v; double amplitude = sqrt(u2v2); totalEnergy += u2v2; //double phase = atan(v / u); imfStream << x << " " << u << endl; ampStream << x << " " << amplitude << endl; if (i > 0 && i < n - 1) { double frequency = (getRealTangent(i, conjugatedSignal) * u / n - getTangent(i, realSignal) * v) / u2v2 / xStep; freqStream << x << " " << frequency << endl; } // Calculating average period based on zero-crossings (not part of Analytic signal calculation actually) if (i > 0) { if ((u > 0 && realSignal[i - 1] < 0) || (u < 0 && realSignal[i - 1] > 0)) { double zeroCross = (x + xs[i - 1]) / 2; if (prevZeroCross > xs[0]) { perStream << (zeroCross + prevZeroCross) / 2 << " " << 2 * (zeroCross - prevZeroCross) << endl; } prevZeroCross = zeroCross; } } } imfStream.close(); ampStream.close(); freqStream.close(); perStream.close(); return totalEnergy / n; }
mObject patternsToJSON(void) { LoadPatternIter &thePatterns = theDomain.getLoadPatterns(); LoadPattern *thePattern; TimeSeries *theSeries; NodalLoadIter *nli; NodalLoad *nload; const Vector *load_vec; // TODO: // ElementalLoadIter *eli; // SP_ConstraintIter *spci; mObject patterns, pattern, nloads; mArray arr; mValue tmp, tmp2, tmp3; char tag_str[15]; int i, size; patterns.clear(); while ((thePattern = thePatterns()) != 0) { pattern.clear(); // TODO: tmp = thePattern->getClassType(); pattern["type"] = tmp; theSeries = thePattern->getTimeSeries(); tmp2 = theSeries->getTag(); sprintf(tag_str, "%d", tmp2.get_int()); pattern["tsTag"] = tag_str; nli = &(thePattern->getNodalLoads()); nloads.clear(); while((nload = (*nli)()) != 0) { tmp2 = nload->getNodeTag(); sprintf(tag_str, "%d", tmp2.get_int()); load_vec = nload->getLoadValue(); size = load_vec->Size(); arr.clear(); for (i = 0; i < size; i++) { tmp3 = (*load_vec)(i); arr.push_back(tmp3); } nloads[tag_str] = arr; } pattern["nodalLoads"] = nloads; tmp2 = thePattern->getTag(); sprintf(tag_str, "%d", tmp2.get_int()); patterns[tag_str] = pattern; } return patterns; }
void GARCHTest::testCalculation() { BOOST_TEST_MESSAGE("Testing GARCH model calculation..."); Date d(7, July, 1962); TimeSeries<Volatility> ts; Garch11 garch(0.2, 0.3, 0.4); Volatility r = 0.1; for (std::size_t i = 0; i < 10; ++i, d += 1) { ts[d] = r; } TimeSeries<Volatility> tsout = garch.calculate(ts); std::for_each(tsout.cbegin(), tsout.cend(), check_ts); }
TEST(TimeSeriesTest, Set) { TimeSeries<int> series; ASSERT_TRUE(series.empty()); series.set(1); ASSERT_FALSE(series.empty()); const Option<TimeSeries<int>::Value> latest = series.latest(); ASSERT_SOME(latest); ASSERT_EQ(1, latest.get().data); }
double Distance::DTW(const TimeSeries &ts1, const TimeSeries &ts2) { int len1 = ts1.length(); int len2 = ts2.length(); double *table_d = new double[len1 * len2]; double *table_g = new double[len1 * len2]; calcCost(ts1, ts2, table_d, table_g, len1, len2); calcGamma(table_d, table_g, len1, len2); double dist = calcSum(table_d, table_g, len1, len2); delete[] table_d; delete[] table_g; return dist; }
std::vector<int> Distance::topK(const TimeSeries &ts, int k) { const TimeSeriesCollection *db = this->collection; int ts_length = ts.length(); for (int i = 0; i < db->sequences.size(); i++) { ts_length = std::min(ts_length, (int)db->sequences[i].length()); } // Get distances std::vector<std::pair<int, float> > pairs; for (int i = 0; i < db->sequences.size(); i++) { float d = this->distance(ts, db->sequences[i], ts_length); pairs.push_back(std::make_pair(i, d)); } // Sort by value std::sort(pairs.begin(), pairs.end(), comp); // Return the indexes of top K. std::vector<int> ret; for (int i = 0; i < k; i++) { ret.push_back(pairs[i].first); } return ret; }
TimeSeries<Real> IntervalPrice::extractComponent( const TimeSeries<IntervalPrice>& ts, IntervalPrice::Type t) { std::vector<Date> dates = ts.dates(); std::vector<Real> values = extractValues(ts, t); return TimeSeries<Real>(dates.begin(), dates.end(), values.begin()); }
list<int> toList(const TimeSeries<int>& series) { list<int> result; foreach (const TimeSeries<int>::Value& value, series.get()) { result.push_back(value.data); } return result; }
// Iterate from an initial state with the k'th function knocked out void DynSysModel::KoIterate( NTuple& initial_state, size_t length, TimeSeries& result, size_t kov ) { // Make sure the result is empty result.clear(); // Force k'th entry to zero in the initial state - no longer needed, corrected when file is read in // NTuple state1 = initial_state; // state1.Reset( kov ); // Save the initial state as the first time series entry result.push_back( initial_state ); mState = initial_state; NTuple next_state; PolyModelIter iter; size_t i,k; for( i = length-1; i > 0; --i ) { k = 1; // For each polynomial next_state.Reset(); iter = mModel.begin(); while( iter != mModel.end() ) { // Force the knockout function result to zero if( k == kov ) { next_state.Assign( k, 0 ); } else { // Evaluate the k'th polynomial at the current state // to produce a new value for the k'th variable next_state.Assign( k, mModel[k-1].Evaluate( mState ) ); } // next polynomial ++iter; ++k; } // Update the current state to be the newly compute state mState = next_state; result.push_back( next_state ); } }
MOTIF_BEGIN inline double EuclideanDistance(TimeSeries &t1, TimeSeries &t2, string name) { double dist = 0.0; if(t1.size() != t2.size()) { cout << t1.size() << "," << t2.size() << endl; cout << "Called by " << name << endl; system("pause"); } for(size_t i = 0; i < t1.size(); ++i) { dist += pow(t1[i] - t2[i], 2.0); } return sqrt(dist); }
//--------------------------------------------------------------------------- void getMeanAndStdErrorTimeSeries(const std::vector<TimeSeries>& timeSeries, TimeSeries& mean, TimeSeries& stdError) { assert(mean.isNull()==true); mean = timeSeries[0]; const unsigned int nTimeSeries = timeSeries.size(); for (unsigned i=1; i<nTimeSeries; ++i) { //Get the index of the final index mean += timeSeries[i]; } mean/=nTimeSeries; }
TimeSeries<Volatility> ConstantEstimator::calculate(const TimeSeries<Volatility>& volatilitySeries) { TimeSeries<Volatility> retval; const std::vector<Volatility> u = volatilitySeries.values(); TimeSeries<Volatility>::const_iterator prev, next, cur, start; cur = volatilitySeries.begin(); std::advance(cur, size_); // ICK. This could probably be made a lot more efficient for (Size i=size_; i < volatilitySeries.size(); i++) { Size j; Real sumu2=0.0, sumu=0.0; for (j=i-size_; j <i; j++) { sumu += u[j]; sumu2 += u[j]*u[j]; } Real s = std::sqrt(sumu2/(Real)size_ - sumu*sumu / (Real) size_ / (Real) (size_+1)); retval[cur->first] = s; ++cur; } return retval; }
TimeSeriesLinePlotData::TimeSeriesLinePlotData(TimeSeries timeSeries, double fracDaysOffset) : m_timeSeries(timeSeries), m_minX(timeSeries.firstReportDateTime().date().dayOfYear()+timeSeries.firstReportDateTime().time().totalDays()), m_maxX(timeSeries.daysFromFirstReport()[timeSeries.daysFromFirstReport().size()-1]+timeSeries.firstReportDateTime().date().dayOfYear()+timeSeries.firstReportDateTime().time().totalDays()), // end day m_minY(minimum(timeSeries.values())), m_maxY(maximum(timeSeries.values())), m_size(timeSeries.values().size()) { m_boundingRect = QwtDoubleRect(m_minX, m_minY, (m_maxX - m_minX), (m_maxY - m_minY)); m_minValue = m_minY; m_maxValue = m_maxY; m_units = timeSeries.units(); m_fracDaysOffset = fracDaysOffset; // note updating in xValue does not affect scaled axis m_x = m_timeSeries.daysFromFirstReport(); m_y = m_timeSeries.values(); }