Пример #1
0
bool
shouldCloseLedger (
    bool anyTransactions,
    int previousProposers,
    int proposersClosed,
    int proposersValidated,
    std::chrono::milliseconds previousTime,
    std::chrono::milliseconds currentTime, // Time since last ledger's close time
    std::chrono::milliseconds openTime,    // Time waiting to close this ledger
    std::chrono::seconds idleInterval,
    beast::Journal j)
{
    using namespace std::chrono_literals;
    if ((previousTime < -1s) || (previousTime > 10min) ||
        (currentTime > 10min))
    {
        // These are unexpected cases, we just close the ledger
        JLOG (j.warn()) <<
            "shouldCloseLedger Trans=" << (anyTransactions ? "yes" : "no") <<
            " Prop: " << previousProposers << "/" << proposersClosed <<
            " Secs: " << currentTime.count() << " (last: " <<
            previousTime.count() << ")";
        return true;
    }

    if ((proposersClosed + proposersValidated) > (previousProposers / 2))
    {
        // If more than half of the network has closed, we close
        JLOG (j.trace()) << "Others have closed";
        return true;
    }

    if (!anyTransactions)
    {
        // Only close at the end of the idle interval
        return currentTime >= idleInterval; // normal idle
    }

    // Preserve minimum ledger open time
    if (openTime < LEDGER_MIN_CLOSE)
    {
        JLOG (j.debug()) <<
            "Must wait minimum time before closing";
        return false;
    }

    // Don't let this ledger close more than twice as fast as the previous
    // ledger reached consensus so that slower validators can slow down
    // the network
    if (openTime < (previousTime / 2))
    {
        JLOG (j.debug()) <<
            "Ledger has not been open long enough";
        return false;
    }

    // Close the ledger
    return true;
}
Пример #2
0
    void stop () override
    {
        stop_async ();

        JLOG(m_journal.debug()) << "Waiting to stop";
        std::unique_lock<std::mutex> lk{m_mut};
        m_cv.wait(lk, [this]{return m_asyncHandlersCompleted;});
        lk.unlock();
        JLOG(m_journal.debug()) << "Stopped";
    }
Пример #3
0
    /** Process a single ledger
        @param ledgerIndex The index of the ledger to process.
        @param ledgerHash  The known correct hash of the ledger.
        @param doNodes Ensure all ledger nodes are in the node db.
        @param doTxns Reprocess (account) transactions to SQL databases.
        @return `true` if the ledger was cleaned.
    */
    bool doLedger(
        LedgerIndex const& ledgerIndex,
        LedgerHash const& ledgerHash,
        bool doNodes,
        bool doTxns)
    {
        auto nodeLedger = app_.getInboundLedgers().acquire (
            ledgerHash, ledgerIndex, InboundLedger::fcGENERIC);
        if (!nodeLedger)
        {
            JLOG (j_.debug()) << "Ledger " << ledgerIndex << " not available";
            app_.getLedgerMaster().clearLedger (ledgerIndex);
            app_.getInboundLedgers().acquire(
                ledgerHash, ledgerIndex, InboundLedger::fcGENERIC);
            return false;
        }

        auto dbLedger = loadByIndex(ledgerIndex, app_);
        if (! dbLedger ||
            (dbLedger->info().hash != ledgerHash) ||
            (dbLedger->info().parentHash != nodeLedger->info().parentHash))
        {
            // Ideally we'd also check for more than one ledger with that index
            JLOG (j_.debug()) <<
                "Ledger " << ledgerIndex << " mismatches SQL DB";
            doTxns = true;
        }

        if(! app_.getLedgerMaster().fixIndex(ledgerIndex, ledgerHash))
        {
            JLOG (j_.debug()) << "ledger " << ledgerIndex
                            << " had wrong entry in history";
            doTxns = true;
        }

        if (doNodes && !nodeLedger->walkLedger(app_.journal ("Ledger")))
        {
            JLOG (j_.debug()) << "Ledger " << ledgerIndex << " is missing nodes";
            app_.getLedgerMaster().clearLedger (ledgerIndex);
            app_.getInboundLedgers().acquire(
                ledgerHash, ledgerIndex, InboundLedger::fcGENERIC);
            return false;
        }

        if (doTxns && !pendSaveValidated(app_, nodeLedger, true, false))
        {
            JLOG (j_.debug()) << "Failed to save ledger " << ledgerIndex;
            return false;
        }

        return true;
    }
Пример #4
0
// Build a ledger from consensus transactions
std::shared_ptr<Ledger>
buildLedger(
    std::shared_ptr<Ledger const> const& parent,
    NetClock::time_point closeTime,
    const bool closeTimeCorrect,
    NetClock::duration closeResolution,
    SHAMap const& txs,
    Application& app,
    CanonicalTXSet& retriableTxs,
    beast::Journal j)
{
    JLOG(j.debug()) << "Report: TxSt = " << txs.getHash().as_uint256()
                    << ", close " << closeTime.time_since_epoch().count()
                    << (closeTimeCorrect ? "" : " (incorrect)");

    return buildLedgerImpl(
        parent,
        closeTime,
        closeTimeCorrect,
        closeResolution,
        app,
        j,
        [&](OpenView& accum, std::shared_ptr<Ledger> const& buildLCL) {
            retriableTxs = applyTransactions(app, txs, accum, buildLCL, j);
        });
}
Пример #5
0
    void runImpl ()
    {
        beast::Thread::setCurrentThreadName ("LedgerCleaner");
        JLOG (j_.debug()) << "Started";

        init();

        while (true)
        {
            {
                std::unique_lock<std::mutex> lock (mutex_);
                wakeup_.wait(lock, [this]()
                    {
                        return (
                            shouldExit_ ||
                            state_ == State::startCleaning);
                    });
                if (shouldExit_)
                    break;

                state_ = State::cleaning;
            }
            doLedgerCleaner();
        }

        stopped();
    }
Пример #6
0
ApplyResult
applyTransaction (Application& app, OpenView& view,
    STTx const& txn,
        bool retryAssured, ApplyFlags flags,
            beast::Journal j)
{
    // Returns false if the transaction has need not be retried.
    if (retryAssured)
        flags = flags | tapRETRY;

    JLOG (j.debug()) << "TXN "
        << txn.getTransactionID ()
        //<< (engine.view().open() ? " open" : " closed")
        // because of the optional in engine
        << (retryAssured ? "/retry" : "/final");

    try
    {
        auto const result = apply(app,
            view, txn, flags, j);
        if (result.second)
        {
            JLOG (j.debug())
                << "Transaction applied: " << transHuman (result.first);
            return ApplyResult::Success;
        }

        if (isTefFailure (result.first) || isTemMalformed (result.first) ||
            isTelLocal (result.first))
        {
            // failure
            JLOG (j.debug())
                << "Transaction failure: " << transHuman (result.first);
            return ApplyResult::Fail;
        }

        JLOG (j.debug())
            << "Transaction retry: " << transHuman (result.first);
        return ApplyResult::Retry;
    }
    catch (std::exception const&)
    {
        JLOG (j.warn()) << "Throws";
        return ApplyResult::Fail;
    }
}
Пример #7
0
    void stop_async () override
    {
        if (m_stop_called.exchange (true) == false)
        {
            m_io_service.dispatch (m_strand.wrap (std::bind (
                &ResolverAsioImpl::do_stop,
                    this, CompletionCounter (this))));

            JLOG(m_journal.debug()) << "Queued a stop request";
        }
    }
Пример #8
0
ConsensusState
checkConsensus (
    int previousProposers,
    int currentProposers,
    int currentAgree,
    int currentFinished,
    std::chrono::milliseconds previousAgreeTime,
    std::chrono::milliseconds currentAgreeTime,
    bool proposing,
    beast::Journal j)
{
    JLOG (j.trace()) <<
        "checkConsensus: prop=" << currentProposers <<
        "/" << previousProposers <<
        " agree=" << currentAgree << " validated=" << currentFinished <<
        " time=" << currentAgreeTime.count() <<  "/" << previousAgreeTime.count();

    if (currentAgreeTime <= LEDGER_MIN_CONSENSUS)
        return ConsensusState::No;

    if (currentProposers < (previousProposers * 3 / 4))
    {
        // Less than 3/4 of the last ledger's proposers are present; don't
        // rush: we may need more time.
        if (currentAgreeTime < (previousAgreeTime + LEDGER_MIN_CONSENSUS))
        {
            JLOG (j.trace()) <<
                "too fast, not enough proposers";
            return ConsensusState::No;
        }
    }

    // Have we, together with the nodes on our UNL list, reached the threshold
    // to declare consensus?
    if (checkConsensusReached (currentAgree, currentProposers, proposing))
    {
        JLOG (j.debug()) << "normal consensus";
        return ConsensusState::Yes;
    }

    // Have sufficient nodes on our UNL list moved on and reached the threshold
    // to declare consensus?
    if (checkConsensusReached (currentFinished, currentProposers, false))
    {
        JLOG (j.warn()) <<
            "We see no consensus, but 80% of nodes have moved on";
        return ConsensusState::MovedOn;
    }

    // no consensus yet
    JLOG (j.trace()) << "no consensus";
    return ConsensusState::No;
}
Пример #9
0
std::shared_ptr<Ledger>
buildLedgerImpl(
    std::shared_ptr<Ledger const> const& parent,
    NetClock::time_point closeTime,
    const bool closeTimeCorrect,
    NetClock::duration closeResolution,
    Application& app,
    beast::Journal j,
    ApplyTxs&& applyTxs)
{
    auto buildLCL = std::make_shared<Ledger>(*parent, closeTime);

    if (buildLCL->rules().enabled(featureSHAMapV2) &&
        !buildLCL->stateMap().is_v2())
    {
        buildLCL->make_v2();
    }

    // Set up to write SHAMap changes to our database,
    //   perform updates, extract changes

    {
        OpenView accum(&*buildLCL);
        assert(!accum.open());
        applyTxs(accum, buildLCL);
        accum.apply(*buildLCL);
    }

    buildLCL->updateSkipList();

    {
        // Write the final version of all modified SHAMap
        // nodes to the node store to preserve the new LCL

        int const asf = buildLCL->stateMap().flushDirty(
            hotACCOUNT_NODE, buildLCL->info().seq);
        int const tmf = buildLCL->txMap().flushDirty(
            hotTRANSACTION_NODE, buildLCL->info().seq);
        JLOG(j.debug()) << "Flushed " << asf << " accounts and " << tmf
                        << " transaction nodes";
    }
    buildLCL->unshare();

    // Accept ledger
    buildLCL->setAccepted(
        closeTime, closeResolution, closeTimeCorrect, app.config());

    return buildLCL;
}
Пример #10
0
    void sweep () override
    {
        clock_type::time_point const now (m_clock.now());

        // Make a list of things to sweep, while holding the lock
        std::vector <MapType::mapped_type> stuffToSweep;
        std::size_t total;
        {
            ScopedLockType sl (mLock);
            MapType::iterator it (mLedgers.begin ());
            total = mLedgers.size ();
            stuffToSweep.reserve (total);

            while (it != mLedgers.end ())
            {
                if (it->second->getLastAction () > now)
                {
                    it->second->touch ();
                    ++it;
                }
                else if ((it->second->getLastAction () +
                          std::chrono::minutes (1)) < now)
                {
                    stuffToSweep.push_back (it->second);
                    // shouldn't cause the actual final delete
                    // since we are holding a reference in the vector.
                    it = mLedgers.erase (it);
                }
                else
                {
                    ++it;
                }
            }

            beast::expire (mRecentFailures, kReacquireInterval);

        }

        JLOG (j_.debug()) <<
            "Swept " << stuffToSweep.size () <<
            " out of " << total << " inbound ledgers.";
    }
Пример #11
0
    void do_resolve (std::vector <std::string> const& names,
        HandlerType const& handler, CompletionCounter)
    {
        assert (! names.empty());

        if (m_stop_called == false)
        {
            m_work.emplace_back (names, handler);

            JLOG(m_journal.debug()) <<
                "Queued new job with " << names.size() <<
                " tasks. " << m_work.size() << " jobs outstanding.";

            if (m_work.size() > 0)
            {
                m_io_service.post (m_strand.wrap (std::bind (
                    &ResolverAsioImpl::do_work, this,
                        CompletionCounter (this))));
            }
        }
    }
Пример #12
0
// Build a ledger by replaying
std::shared_ptr<Ledger>
buildLedger(
    LedgerReplay const& replayData,
    ApplyFlags applyFlags,
    Application& app,
    beast::Journal j)
{
    auto const& replayLedger = replayData.replay();

    JLOG(j.debug()) << "Report: Replay Ledger " << replayLedger->info().hash;

    return buildLedgerImpl(
        replayData.parent(),
        replayLedger->info().closeTime,
        ((replayLedger->info().closeFlags & sLCF_NoConsensusTime) == 0),
        replayLedger->info().closeTimeResolution,
        app,
        j,
        [&](OpenView& accum, std::shared_ptr<Ledger> const& buildLCL) {
            for (auto& tx : replayData.orderedTxns())
                applyTransaction(app, accum, *tx.second, false, applyFlags, j);
        });
}
Пример #13
0
void SHAMapNodeID::dump (beast::Journal journal) const
{
    JLOG(journal.debug()) <<
        getString ();
}
Пример #14
0
CanonicalTXSet
applyTransactions(
    Application& app,
    SHAMap const& txns,
    OpenView& view,
    std::shared_ptr<Ledger> const& buildLCL,
    beast::Journal j)
{
    CanonicalTXSet retriableTxs(txns.getHash().as_uint256());

    for (auto const& item : txns)
    {
        if (buildLCL->txExists(item.key()))
            continue;

        // The transaction wasn't filtered
        // Add it to the set to be tried in canonical order
        JLOG(j.debug()) << "Processing candidate transaction: " << item.key();
        try
        {
            retriableTxs.insert(
                std::make_shared<STTx const>(SerialIter{item.slice()}));
        }
        catch (std::exception const&)
        {
            JLOG(j.warn()) << "Txn " << item.key() << " throws";
        }
    }

    bool certainRetry = true;
    // Attempt to apply all of the retriable transactions
    for (int pass = 0; pass < LEDGER_TOTAL_PASSES; ++pass)
    {
        JLOG(j.debug()) << "Pass: "******" Txns: " << retriableTxs.size()
                        << (certainRetry ? " retriable" : " final");
        int changes = 0;

        auto it = retriableTxs.begin();

        while (it != retriableTxs.end())
        {
            try
            {
                switch (applyTransaction(
                    app, view, *it->second, certainRetry, tapNONE, j))
                {
                    case ApplyResult::Success:
                        it = retriableTxs.erase(it);
                        ++changes;
                        break;

                    case ApplyResult::Fail:
                        it = retriableTxs.erase(it);
                        break;

                    case ApplyResult::Retry:
                        ++it;
                }
            }
            catch (std::exception const&)
            {
                JLOG(j.warn()) << "Transaction throws";
                it = retriableTxs.erase(it);
            }
        }

        JLOG(j.debug()) << "Pass: "******" finished " << changes
                        << " changes";

        // A non-retry pass made no changes
        if (!changes && !certainRetry)
            return retriableTxs;

        // Stop retriable passes
        if (!changes || (pass >= LEDGER_RETRY_PASSES))
            certainRetry = false;
    }

    // If there are any transactions left, we must have
    // tried them in at least one final pass
    assert(retriableTxs.empty() || !certainRetry);
    return retriableTxs;
}
Пример #15
0
    /** Run the ledger cleaner. */
    void doLedgerCleaner()
    {
        auto shouldExit = [this]()
        {
            std::lock_guard<std::mutex> lock(mutex_);
            return shouldExit_;
        };

        std::shared_ptr<ReadView const> goodLedger;

        while (! shouldExit())
        {
            LedgerIndex ledgerIndex;
            LedgerHash ledgerHash;
            bool doNodes;
            bool doTxns;

            while (app_.getFeeTrack().isLoadedLocal())
            {
                JLOG (j_.debug()) << "Waiting for load to subside";
                std::this_thread::sleep_for(std::chrono::seconds(5));
                if (shouldExit())
                    return;
            }

            {
                std::lock_guard<std::mutex> lock (mutex_);
                if ((minRange_ > maxRange_) ||
                    (maxRange_ == 0) || (minRange_ == 0))
                {
                    minRange_ = maxRange_ = 0;
                    state_ = State::readyToClean;
                    return;
                }
                ledgerIndex = maxRange_;
                doNodes = checkNodes_;
                doTxns = fixTxns_;
            }

            ledgerHash = getHash(ledgerIndex, goodLedger);

            bool fail = false;
            if (ledgerHash.isZero())
            {
                JLOG (j_.info()) << "Unable to get hash for ledger "
                               << ledgerIndex;
                fail = true;
            }
            else if (!doLedger(ledgerIndex, ledgerHash, doNodes, doTxns))
            {
                JLOG (j_.info()) << "Failed to process ledger " << ledgerIndex;
                fail = true;
            }

            if (fail)
            {
                {
                    std::lock_guard<std::mutex> lock (mutex_);
                    ++failures_;
                }
                // Wait for acquiring to catch up to us
                std::this_thread::sleep_for(std::chrono::seconds(2));
            }
            else
            {
                {
                    std::lock_guard<std::mutex> lock (mutex_);
                    if (ledgerIndex == minRange_)
                        ++minRange_;
                    if (ledgerIndex == maxRange_)
                        --maxRange_;
                    failures_ = 0;
                }
                // Reduce I/O pressure and wait for acquiring to catch up to us
                std::this_thread::sleep_for(std::chrono::milliseconds(100));
            }

        }
    }
Пример #16
0
 void init ()
 {
     JLOG (j_.debug()) << "Initializing";
 }