Exemple #1
0
/**
 * @brief Fetches chat messages counts for each day from the database.
 * @param friendPk Friend public key to fetch.
 * @param from Start of period to fetch.
 * @param to End of period to fetch.
 * @return List of structs containing days offset and message count for that day.
 */
QList<History::DateMessages> History::getChatHistoryCounts(const ToxPk& friendPk, const QDate& from,
                                                           const QDate& to)
{
    if (!isValid()) {
        return {};
    }
    QDateTime fromTime(from);
    QDateTime toTime(to);

    QList<DateMessages> counts;

    auto rowCallback = [&counts](const QVector<QVariant>& row) {
        DateMessages app;
        app.count = row[0].toUInt();
        app.offsetDays = row[1].toUInt();
        counts.append(app);
    };

    QString queryText =
        QString("SELECT COUNT(history.id), ((timestamp / 1000 / 60 / 60 / 24) - %4 ) AS day "
                "FROM history "
                "JOIN peers chat ON chat_id = chat.id "
                "WHERE timestamp BETWEEN %1 AND %2 AND chat.public_key='%3'"
                "GROUP BY day;")
            .arg(fromTime.toMSecsSinceEpoch())
            .arg(toTime.toMSecsSinceEpoch())
            .arg(friendPk.toString())
            .arg(QDateTime::fromMSecsSinceEpoch(0).daysTo(fromTime));

    db->execNow({queryText, rowCallback});

    return counts;
}
Exemple #2
0
AbsTime AbsTime::now() {
    struct timespec ts;
    ::clock_gettime(CLOCK_REALTIME, &ts);
    AbsTime time_now;
    time_now.timepoint = toTime(ts).nanosecs;
    return time_now;
}
time_t parseTimeString( const std::string& stddatestr )
{
    const char* datestr = stddatestr.c_str();
    const char* eos     = datestr + strlen( datestr );

    typedef std::list<std::string> formats_t;
    formats_t formats;
    
    formats.push_back( "%Y-%m-%dT%H:%M:%S" );
    formats.push_back( "%y %b %d %H:%M:%S" );
    formats.push_back( "%y %b %d %H:%M" );

    for( formats_t::iterator iter = formats.begin(); iter != formats.end(); ++iter )
    {
        std::string format = *iter;
        struct tm tm;
        memset( &tm, 0, sizeof(struct tm));
        const char* rc = UT_strptime( datestr, format.c_str(), &tm );
        if( rc == eos )
        {
//            UT_DEBUGMSG(("parseTimeString(OK) input:%s format:%s ret:%ld\n",
//                         datestr, format.c_str(), toTime(&tm) ));
            return toTime(&tm);
        }
    }

//    UT_DEBUGMSG(("parseTimeString(f) input:%s\n", datestr ));
    return 0;
}
int main() {
#ifndef ONLINE_JUDGE
    std::ifstream fis("AverageSpeed.in");
    std::cin.rdbuf(fis.rdbuf());
#endif   
    std::string line;
    double currentSpeed = 0.0;
    size_t lastTime = 0;
    size_t totalTimeTraveled = 0;
    double distance = 0.0; 
    while (std::getline(std::cin, line)) {
        std::istringstream iss(line);
        std::string time; 
        std::string speed;
        iss >> time >> speed;
        size_t seconds = toSeconds(time);
        size_t delta = seconds - lastTime;
        lastTime = seconds;
        totalTimeTraveled += delta;
        distance += static_cast<double>(delta) * currentSpeed;
        
        if (!speed.empty()) {
            currentSpeed = std::stod(speed) / 3600.00;
        } else {
            std::cout << toTime(totalTimeTraveled) << ' ' << std::fixed << std::setprecision(2) << distance << " km" << std::endl;
        }
    }
    return 0;
}
static void writeCsvRow(SOCKET fd, struct Data* row){
	char date[11];
	toDate(date, row->ts - row->dr);

 // This is the time marking the start of the interval
	char timeFrom[9];
	toTime(timeFrom, row->ts - row->dr);

 // This is the time marking the end of the interval
	char timeTo[9];
	toTime(timeTo, row->ts);

	char rowTxt[256];
	sprintf(rowTxt, "%s,%s,%s,%llu,%llu,%s,%s\n", date, timeFrom, timeTo, row->dl, row->ul, (row->hs == NULL) ? "" : row->hs, row->ad);	
	writeText(fd, rowTxt);
	
	freeData(row);
}
Exemple #6
0
 void dfs(int p, int k, vector<bool>& path, vector<string>& ans) {
     int n = path.size();
     if(!isValid(path)) return;
     if(k == 0 || p == n) {
         if(k == 0) {
             ans.push_back(toTime(path));
         }
         return;
     }
     for(int i = p; i < n; i++) {
         path[i] = true;
         dfs(i+1, k-1, path, ans);
         path[i] = false;
     }
 }
// here we use DataTime instead of QT's built-in time conversion
// since the latter does not properly handle fractions of seconds
// in the ISO8601 specification.
QString Soprano::LiteralValue::toString() const
{
    if ( d ) {
        if ( !d->stringCacheValid ) {
            if( isInt() )
                d->stringCache = QString::number( toInt() );
            else if( isInt64() )
                d->stringCache = QString::number( toInt64() );
            else if( isUnsignedInt() )
                d->stringCache = QString::number( toUnsignedInt() );
            else if( isUnsignedInt64() )
                d->stringCache = QString::number( toUnsignedInt64() );
            else if( isBool() )
                d->stringCache = ( toBool() ? QString("true") : QString("false" ) );
            else if( isDouble() ) // FIXME: decide on a proper double encoding or check if there is one in xml schema
                d->stringCache = QString::number( toDouble(), 'e', 10 );
            else if( isDate() )
                d->stringCache = DateTime::toString( toDate() );
            else if( isTime() )
                d->stringCache = DateTime::toString( toTime() );
            else if( isDateTime() )
                d->stringCache = DateTime::toString( toDateTime() );
            else if ( isByteArray() )
                d->stringCache = QString::fromAscii( toByteArray().toBase64() );
            else
                d->stringCache = d->value.toString();

            d->stringCacheValid = true;
        }

        return d->stringCache;
    }
    else {
        return QString();
    }
}
Exemple #8
0
TEST(Test, History)
{
    /*
     * Create a pub and sub in different participants,
     * ensure we can send a message between them.
     */

    // eprosima::Log::setVerbosity(eprosima::VERB_ERROR);

    // histSmall is a history depth we can overflow quickly
    // histLarge is a history that we don't expect to overflow
    static constexpr int histSmall = 5;
    static constexpr int histLarge = 1000;

    for (ReliabilityQosPolicyKind qos : {BEST_EFFORT_RELIABILITY_QOS, RELIABLE_RELIABILITY_QOS}) {
        for (int pubHist : {histSmall, histLarge}) {
            for (int subHist : {histSmall, histLarge}) {

                std::cout << "qos=" << qos << " pubHist=" << pubHist << " subHist=" << subHist
                          << std::endl;

                // create publisher partition
                ParticipantAttributes pubPartAttr;
                pubPartAttr.rtps.builtin.use_SIMPLE_RTPSParticipantDiscoveryProtocol = true;
                pubPartAttr.rtps.builtin.use_SIMPLE_EndpointDiscoveryProtocol = true;
                pubPartAttr.rtps.builtin.m_simpleEDP.use_PublicationReaderANDSubscriptionWriter =
                    true;
                pubPartAttr.rtps.builtin.m_simpleEDP.use_PublicationWriterANDSubscriptionReader =
                    true;
                pubPartAttr.rtps.builtin.domainId = domainId;
                pubPartAttr.rtps.sendSocketBufferSize = socketBufSize;
                pubPartAttr.rtps.listenSocketBufferSize = socketBufSize;
                pubPartAttr.rtps.setName("TestPublisher");
                Participant *pubPart = Domain::createParticipant(pubPartAttr);
                ASSERT_NE(pubPart, nullptr);

                // register publisher data type
                Domain::registerType(pubPart, &topicDataType);

                // create publisher listener
                TestPublisherListener pubList;

                // create publisher
                PublisherAttributes pubAttr;
                pubAttr.topic.topicKind = NO_KEY;
                pubAttr.topic.topicName = TopicData::topicName;
                pubAttr.topic.topicDataType = TopicData::topicType;
                pubAttr.topic.historyQos.kind = KEEP_LAST_HISTORY_QOS;
                pubAttr.topic.historyQos.depth = pubHist;
                // Publisher's max_samples has to be at least 2 more than its
                // history depth; there is something in the way it allocates a
                // cache entry, then checks to see if history is full, where the
                // allocate can fail and the history clean is never attempted.
                // +1 is not enough; +2 seems to be okay.
                pubAttr.topic.resourceLimitsQos.max_samples = pubHist + 2;
                pubAttr.topic.resourceLimitsQos.allocated_samples = pubHist + 2;
                pubAttr.times.heartbeatPeriod = toTime(0.100);
                pubAttr.times.nackResponseDelay = toTime(0.010);
                pubAttr.qos.m_reliability.kind = qos;
                Publisher *pub = Domain::createPublisher(pubPart, pubAttr, &pubList);
                ASSERT_NE(pub, nullptr);

                // create subscriber partition
                ParticipantAttributes subPartAttr;
                subPartAttr.rtps.builtin.use_SIMPLE_RTPSParticipantDiscoveryProtocol = true;
                subPartAttr.rtps.builtin.use_SIMPLE_EndpointDiscoveryProtocol = true;
                subPartAttr.rtps.builtin.m_simpleEDP.use_PublicationReaderANDSubscriptionWriter =
                    true;
                subPartAttr.rtps.builtin.m_simpleEDP.use_PublicationWriterANDSubscriptionReader =
                    true;
                subPartAttr.rtps.builtin.domainId = domainId;
                subPartAttr.rtps.sendSocketBufferSize = socketBufSize;
                subPartAttr.rtps.listenSocketBufferSize = socketBufSize;
                subPartAttr.rtps.setName("TestSubscriber");
                Participant *subPart = Domain::createParticipant(subPartAttr);
                ASSERT_NE(subPart, nullptr);

                // register subscriber data type
                Domain::registerType(subPart, &topicDataType);

                // create subscriber listener
                TestSubscriberListener subList;

                // create subscriber
                SubscriberAttributes subAttr;
                subAttr.topic.topicKind = NO_KEY;
                subAttr.topic.topicName = TopicData::topicName;
                subAttr.topic.topicDataType = TopicData::topicType;
                subAttr.topic.historyQos.kind = KEEP_LAST_HISTORY_QOS;
                subAttr.topic.historyQos.depth = subHist;
                subAttr.topic.resourceLimitsQos.max_samples = subHist + 2;
                subAttr.topic.resourceLimitsQos.allocated_samples = subHist + 2;
                subAttr.times.heartbeatResponseDelay = toTime(0.010);
                subAttr.qos.m_reliability.kind = qos;
                Subscriber *sub = Domain::createSubscriber(subPart, subAttr, &subList);
                ASSERT_NE(sub, nullptr);

                // should match very quickly - typical is on the third loop (after 2 sleeps)
                int matchLoops;
                for (matchLoops = 0; matchLoops < 100; matchLoops++) {
                    if (pubList.matched == 1 && subList.matched == 1)
                        break;
                    std::this_thread::sleep_for(std::chrono::milliseconds(1));
                }
                ASSERT_EQ(pubList.matched, 1);
                ASSERT_EQ(subList.matched, 1);
                // typical here is "matched in 2 msec" (occasionally 1 msec)
                // std::cout << "matched in " << matchLoops << " msec" << std::endl;

                // Fast-RTPS might have a bug where if it generates a sequence
                // number set that spans more than 256 sequence numbers, it
                // fails an assert instead of doing whatever RTPS says it
                // should do. With the small socket buffers, about 100 messages
                // is enough to (usually) trigger resends on reliable streams.
                // With release builds and msgCount=100, the acknack asks for
                // ~40..50 to be resent. This shouldn't be more than 255 in
                // order to avoid the assert.
                int msgCount = 250;

                // subTimeout should be long enough for pub to send another heartbeat
                // and sub to request more messages
                TestClock::duration subTimeout = std::chrono::milliseconds(200);

                // evenly spaced, should not miss any
                // cout << "smooth..." << endl;
                subList.sequenceNumbersSeen.clear();
                TestClock::duration msgInterval = std::chrono::milliseconds(1);
                TestClock::time_point msgTime = TestClock::now();
                msgTime += msgInterval;
                for (int i = 0; i < msgCount; i++) {
                    std::this_thread::sleep_until(msgTime);
                    TopicData pubData;
                    EXPECT_TRUE(pub->write(&pubData));
                    TopicData subData;
                    SampleInfo_t sampleInfo;
                    EXPECT_TRUE(subList.takeWithTimeout(subData, sampleInfo, subTimeout));
                    msgTime += msgInterval;
                }

                // burst-send to overrun history - might drop some, but should recover
                // cout << "bursty..." << endl;
                int pubCount = 0;
                for (int i = 0; i < msgCount; i++) {
                    TopicData pubData;
                    if (pub->write(&pubData))
                        pubCount++;
                }
                // should always have sent them all
                EXPECT_GE(pubCount, msgCount);
                // std::cout << "pubCount=" << pubCount << std::endl;
                // see what subscriber got
                // XXX is it possible that once we start pulling messages from
                // the subscriber, it might ask the publisher for more?
                subList.sequenceNumbersSeen.clear();
                int subCount = 0;
                TopicData subData;
                SampleInfo_t sampleInfo;
                while (subList.takeWithTimeout(subData, sampleInfo, subTimeout)) {
                    subCount++;
                }
                // should always have received some
                EXPECT_GT(subCount, 0);
                // How many depends on qos and history depth. The test
                // at this point just makes sure we got _some_; that the
                // system continues working in the face of overruns.
                // std::cout << "subCount=" << subCount << std::endl;
                // subList.showSequenceNumbersSeen();
                if (qos == RELIABLE_RELIABILITY_QOS && msgCount < subHist && msgCount < pubHist) {
                    // no excuse for not getting them all
                    EXPECT_EQ(subCount, msgCount);
                }

                // delete subscriber
                ASSERT_TRUE(Domain::removeSubscriber(sub));
                // delete subscriber participant
                ASSERT_TRUE(Domain::removeParticipant(subPart));

                // delete publisher
                ASSERT_TRUE(Domain::removePublisher(pub));
                // delete publisher participant
                ASSERT_TRUE(Domain::removeParticipant(pubPart));

            } // for (subHist...)
        }     // for (pubHist...)
    }         // for (qos...)
}