// It is possible to get a rate update after idling and before starting anew.
// This can reduce the average during startup of the new channel until start.
void reservation::update_rate(size_t events, const microseconds& database)
{
    // Critical Section
    ///////////////////////////////////////////////////////////////////////////
    history_mutex_.lock();

    performance rate{ false, 0, 0, 0 };
    const auto end = now();
    const auto event_start = end - microseconds(database);
    const auto start = end - rate_window();
    const auto history_count = history_.size();

    // Remove expired entries from the head of the queue.
    for (auto it = history_.begin(); it != history_.end() && it->time < start;
        it = history_.erase(it));

    const auto window_full = history_count > history_.size();
    const auto event_cost = static_cast<uint64_t>(database.count());
    history_.push_back({ events, event_cost, event_start });

    // We can't set the rate until we have a period (two or more data points).
    if (history_.size() < minimum_history)
    {
        history_mutex_.unlock();
        //---------------------------------------------------------------------
        return;
    }

    // Summarize event count and database cost.
    for (const auto& record: history_)
    {
        BITCOIN_ASSERT(rate.events <= max_size_t - record.events);
        rate.events += record.events;
        BITCOIN_ASSERT(rate.database <= max_uint64 - record.database);
        rate.database += record.database;
    }

    // Calculate the duration of the rate window.
    auto window = window_full ? rate_window() : (end - history_.front().time);
    auto count = duration_cast<microseconds>(window).count();
    rate.window = static_cast<uint64_t>(count);

    history_mutex_.unlock();
    ///////////////////////////////////////////////////////////////////////////

    ////LOG_DEBUG(LOG_NODE)
    ////    << "Records (" << slot() << ") "
    ////    << " size: " << rate.events
    ////    << " time: " << divide<double>(rate.window, micro_per_second)
    ////    << " cost: " << divide<double>(rate.database, micro_per_second)
    ////    << " full: " << (full ? "true" : "false");

    // Update the rate cache.
    set_rate(std::move(rate));
}
예제 #2
0
void SendQueue::run() {
    if (_state == State::Stopped) {
        // we've already been asked to stop before we even got a chance to start
        // don't start now
#ifdef UDT_CONNECTION_DEBUG
        qDebug() << "SendQueue asked to run after being told to stop. Will not run.";
#endif
        return;
    } else if (_state == State::Running) {
#ifdef UDT_CONNECTION_DEBUG
        qDebug() << "SendQueue asked to run but is already running (according to state). Will not re-run.";
#endif
        return;
    }
    
    _state = State::Running;
    
    // Wait for handshake to be complete
    while (_state == State::Running && !_hasReceivedHandshakeACK) {
        sendHandshake();

        // Keep processing events
        QCoreApplication::sendPostedEvents(this);
        
        // Once we're here we've either received the handshake ACK or it's going to be time to re-send a handshake.
        // Either way let's continue processing - no packets will be sent if no handshake ACK has been received.
    }

    // Keep an HRC to know when the next packet should have been
    auto nextPacketTimestamp = p_high_resolution_clock::now();

    while (_state == State::Running) {
        bool attemptedToSendPacket = maybeResendPacket();
        
        // if we didn't find a packet to re-send AND we think we can fit a new packet on the wire
        // (this is according to the current flow window size) then we send out a new packet
        auto newPacketCount = 0;
        if (!attemptedToSendPacket) {
            newPacketCount = maybeSendNewPacket();
            attemptedToSendPacket = (newPacketCount > 0);
        }
        
        // since we're a while loop, give the thread a chance to process events
        QCoreApplication::sendPostedEvents(this);
        
        // we just processed events so check now if we were just told to stop
        // If the send queue has been innactive, skip the sleep for
        // Either _isRunning will have been set to false and we'll break
        // Or something happened and we'll keep going
        if (_state != State::Running || isInactive(attemptedToSendPacket)) {
            return;
        }

        // push the next packet timestamp forwards by the current packet send period
        auto nextPacketDelta = (newPacketCount == 2 ? 2 : 1) * _packetSendPeriod;
        nextPacketTimestamp += std::chrono::microseconds(nextPacketDelta);

        // sleep as long as we need for next packet send, if we can
        auto now = p_high_resolution_clock::now();

        auto timeToSleep = duration_cast<microseconds>(nextPacketTimestamp - now);

        // we use nextPacketTimestamp so that we don't fall behind, not to force long sleeps
        // we'll never allow nextPacketTimestamp to force us to sleep for more than nextPacketDelta
        // so cap it to that value
        if (timeToSleep > std::chrono::microseconds(nextPacketDelta)) {
            // reset the nextPacketTimestamp so that it is correct next time we come around
            nextPacketTimestamp = now + std::chrono::microseconds(nextPacketDelta);

            timeToSleep = std::chrono::microseconds(nextPacketDelta);
        }

        // we're seeing SendQueues sleep for a long period of time here,
        // which can lock the NodeList if it's attempting to clear connections
        // for now we guard this by capping the time this thread and sleep for

        const microseconds MAX_SEND_QUEUE_SLEEP_USECS { 2000000 };
        if (timeToSleep > MAX_SEND_QUEUE_SLEEP_USECS) {
            qWarning() << "udt::SendQueue wanted to sleep for" << timeToSleep.count() << "microseconds";
            qWarning() << "Capping sleep to" << MAX_SEND_QUEUE_SLEEP_USECS.count();
            qWarning() << "PSP:" << _packetSendPeriod << "NPD:" << nextPacketDelta
                << "NPT:" << nextPacketTimestamp.time_since_epoch().count()
                << "NOW:" << now.time_since_epoch().count();

            // alright, we're in a weird state
            // we want to know why this is happening so we can implement a better fix than this guard
            // send some details up to the API (if the user allows us) that indicate how we could such a large timeToSleep
            static const QString SEND_QUEUE_LONG_SLEEP_ACTION = "sendqueue-sleep";

            // setup a json object with the details we want
            QJsonObject longSleepObject;
            longSleepObject["timeToSleep"] = qint64(timeToSleep.count());
            longSleepObject["packetSendPeriod"] = _packetSendPeriod.load();
            longSleepObject["nextPacketDelta"] = nextPacketDelta;
            longSleepObject["nextPacketTimestamp"] = qint64(nextPacketTimestamp.time_since_epoch().count());
            longSleepObject["then"] = qint64(now.time_since_epoch().count());

            // hopefully send this event using the user activity logger
            UserActivityLogger::getInstance().logAction(SEND_QUEUE_LONG_SLEEP_ACTION, longSleepObject);

            timeToSleep = MAX_SEND_QUEUE_SLEEP_USECS;
        }

        std::this_thread::sleep_for(timeToSleep);
    }
}
예제 #3
0
 time_point_sec get_file_start_time( const time_point_sec& timestamp, const microseconds& interval )
 {
     int64_t interval_seconds = interval.to_seconds();
     int64_t file_number = timestamp.sec_since_epoch() / interval_seconds;
     return time_point_sec( (uint32_t)(file_number * interval_seconds) );
 }
예제 #4
0
파일: time.cpp 프로젝트: VicHao/fc
 void to_variant( const microseconds& input_microseconds,  variant& output_variant )
 {
   output_variant = input_microseconds.count();
 }