void PeerRecord::resetBackOff(VirtualClock& clock) { mNumFailures = 0; mNextAttempt = mIsPreferred ? VirtualClock::time_point() : clock.now(); CLOG(DEBUG, "Overlay") << "PeerRecord: " << toString() << " backoff reset"; }
std::chrono::seconds PeerRecord::computeBackoff(VirtualClock& clock) { int32 backoffCount = std::min<int32>(MAX_BACKOFF_EXPONENT, mNumFailures); auto nsecs = std::chrono::seconds( std::rand() % int(std::pow(2, backoffCount) * SECONDS_PER_BACKOFF) + 1); mNextAttempt = clock.now() + nsecs; return nsecs; }
cfg2.MAX_ADDITIONAL_PEER_CONNECTIONS = 3; auto app1 = createTestApplication(clock, cfg1); auto app2 = createTestApplication(clock, cfg2); auto app3 = createTestApplication(clock, cfg3); LoopbackPeerConnection conn(*app1, *app2); LoopbackPeerConnection conn2(*app3, *app2); testutil::crankSome(clock); app2->getOverlayManager().start(); // app1 and app3 are both connected to app2. app1 will hammer on the // connection, app3 will do nothing. app2 should disconnect app1. // but app3 should remain connected since the i/o timeout is 30s. auto start = clock.now(); auto end = start + std::chrono::seconds(10); VirtualTimer timer(clock); testutil::injectSendPeersAndReschedule(end, clock, timer, conn); for (size_t i = 0; (i < 1000 && clock.now() < end && clock.crank(false) > 0); ++i) ; REQUIRE(!conn.getInitiator()->isConnected()); REQUIRE(!conn.getAcceptor()->isConnected()); REQUIRE(conn2.getInitiator()->isConnected()); REQUIRE(conn2.getAcceptor()->isConnected()); REQUIRE(app2->getMetrics() .NewMeter({"overlay", "drop", "load-shed"}, "drop")
ApplicationImpl::ApplicationImpl(VirtualClock& clock, Config const& cfg) : mVirtualClock(clock) , mConfig(cfg) , mWorkerIOService(std::thread::hardware_concurrency()) , mWork(make_unique<asio::io_service::work>(mWorkerIOService)) , mWorkerThreads() , mStopSignals(clock.getIOService(), SIGINT) , mStopping(false) , mStoppingTimer(*this) , mMetrics(make_unique<medida::MetricsRegistry>()) , mAppStateCurrent(mMetrics->NewCounter({"app", "state", "current"})) , mAppStateChanges(mMetrics->NewTimer({"app", "state", "changes"})) , mLastStateChange(clock.now()) { #ifdef SIGQUIT mStopSignals.add(SIGQUIT); #endif #ifdef SIGTERM mStopSignals.add(SIGTERM); #endif std::srand(static_cast<uint32>(clock.now().time_since_epoch().count())); mNetworkID = sha256(mConfig.NETWORK_PASSPHRASE); unsigned t = std::thread::hardware_concurrency(); LOG(DEBUG) << "Application constructing " << "(worker threads: " << t << ")"; mStopSignals.async_wait([this](asio::error_code const& ec, int sig) { if (!ec) { LOG(INFO) << "got signal " << sig << ", shutting down"; this->gracefulStop(); } }); // These must be constructed _after_ because they frequently call back // into App.getFoo() to get information / start up. mDatabase = make_unique<Database>(*this); mPersistentState = make_unique<PersistentState>(*this); mTmpDirManager = make_unique<TmpDirManager>(cfg.TMP_DIR_PATH); mOverlayManager = OverlayManager::create(*this); mLedgerManager = LedgerManager::create(*this); mHerder = Herder::create(*this); mBucketManager = BucketManager::create(*this); mHistoryManager = HistoryManager::create(*this); mProcessManager = ProcessManager::create(*this); mCommandHandler = make_unique<CommandHandler>(*this); mWorkManager = WorkManager::create(*this); while (t--) { mWorkerThreads.emplace_back([this, t]() { this->runWorkerThread(t); }); } LOG(DEBUG) << "Application constructed"; }