void NetworkInterfaceMock::incrementNow(Milliseconds inc) {
     boost::lock_guard<boost::mutex> lk(_mutex);
     invariant(inc.total_milliseconds() > 0);
     _now += inc.total_milliseconds();
     _executor->signalWorkForTest();
     _timeElapsed.notify_all();
 }
Example #2
0
Milliseconds randtime(Milliseconds const& min, Milliseconds const& max)
{
    long long diff = max.count() - min.count();
    ASSERT(diff >= 0);
    ASSERT(diff <= (uint32)-1);
    return min + Milliseconds(urand(0, diff));
}
    Status LegacyReplicationCoordinator::_stepDownHelper(OperationContext* txn,
                                                         bool force,
                                                         const Milliseconds& initialWaitTime,
                                                         const Milliseconds& stepdownTime,
                                                         const Milliseconds& postStepdownWaitTime) {
        invariant(getReplicationMode() == modeReplSet);
        if (!getCurrentMemberState().primary()) {
            return Status(ErrorCodes::NotMaster, "not primary so can't step down");
        }

        if (!force) {
            Status status = _waitForSecondary(initialWaitTime, Milliseconds(10 * 1000));
            if (!status.isOK()) {
                return status;
            }
        }

        // step down
        bool worked = repl::theReplSet->stepDown(txn, stepdownTime.total_seconds());
        if (!worked) {
            return Status(ErrorCodes::NotMaster, "not primary so can't step down");
        }

        if (postStepdownWaitTime.total_milliseconds() > 0) {
            log() << "waiting for secondaries to catch up" << endl;

            // The only caller of this with a non-zero postStepdownWaitTime is
            // stepDownAndWaitForSecondary, and the only caller of that is the shutdown command
            // which doesn't actually care if secondaries failed to catch up here, so we ignore the
            // return status of _waitForSecondary
            _waitForSecondary(postStepdownWaitTime, Milliseconds(0));
        }
        return Status::OK();
    }
Example #4
0
Date_t roundTime(Date_t now, Milliseconds period) {
    // Note: auto type deduction is explicitly avoided here to ensure rigid type correctness
    long long clock_duration = now.toMillisSinceEpoch();

    long long now_next_period = clock_duration + period.count();

    long long excess_time(now_next_period % period.count());

    long long next_time = now_next_period - excess_time;

    return Date_t::fromMillisSinceEpoch(next_time);
}
void NetworkInterfaceASIO::waitForWorkUntil(Date_t when) {
    stdx::unique_lock<stdx::mutex> lk(_executorMutex);
    // TODO: This can be restructured with a lambda.
    while (!_isExecutorRunnable) {
        const Milliseconds waitTime(when - now());
        if (waitTime <= Milliseconds(0)) {
            break;
        }
        _isExecutorRunnableCondition.wait_for(lk, waitTime.toSystemDuration());
    }
    _isExecutorRunnable = false;
}
Example #6
0
LockResult CondVarLockGrantNotification::wait(Milliseconds timeout) {
    stdx::unique_lock<stdx::mutex> lock(_mutex);
    return _cond.wait_for(
               lock, timeout.toSystemDuration(), [this] { return _result != LOCK_INVALID; })
        ? _result
        : LOCK_TIMEOUT;
}
Example #7
0
bool Timer::update(const Milliseconds& now)
{
    if(_active == false)
        return false;

    bool hasElapsed = now >= _finished;

    Timer::Sentry sentry(_sentry);

    DateTime ts;

    while( _active && now >= _finished )
    {
        Milliseconds currentTs = _finished;

        // We add another interval before sending the signal
        // since sending might throw an exception. We would
        // skip recalculating the new time then and may loop.
        _finished += _interval;

        if( ! sentry )
            return hasElapsed;

        timeout.send();

        // We send the signal with datetime only, when someone is
        // connected since it will take some time to calculate a
        // DateTime object from milliseconds.
        if (timeoutts.connectionCount() > 0)
        {
            struct tm tim;
            time_t sec = static_cast<time_t>(currentTs.totalSeconds());
            localtime_r(&sec, &tim);
            DateTime dueTime(tim.tm_year + 1900, tim.tm_mon + 1, tim.tm_mday,
                 tim.tm_hour, tim.tm_min, tim.tm_sec,
                 0, currentTs.totalUSecs() % 1000000);
            timeoutts.send(dueTime);
        }

        if (_once)
            stop();
    }

    return hasElapsed;
}
DistLockCatalogImpl::DistLockCatalogImpl(RemoteCommandTargeter* targeter,
                                         ShardRegistry* shardRegistry,
                                         Milliseconds writeConcernTimeout)
    : _client(shardRegistry),
      _targeter(targeter),
      _writeConcern(WriteConcernOptions(WriteConcernOptions::kMajority,
                                        WriteConcernOptions::JOURNAL,
                                        writeConcernTimeout.count())),
      _lockPingNS(LockpingsType::ConfigNS),
      _locksNS(LocksType::ConfigNS) {}
Example #9
0
void IgmpSender::threadFunction()
{
    Milliseconds sleepTime(SLEEP_INTERVAL);
    while (!_stopThread) {
        Milliseconds before;

        _igmpPacket->send();

        Milliseconds after;

        after = after - before;

        int slept = after.getTime();
        while (!_stopThread && slept < _interval * 1000) {
            sleepTime.sleep();
            slept += SLEEP_INTERVAL;
        }
    }
}
Example #10
0
 DistLockCatalogImpl::DistLockCatalogImpl(RemoteCommandTargeter* targeter,
                                          RemoteCommandRunner* executor,
                                          Milliseconds writeConcernTimeout):
     _cmdRunner(executor),
     _targeter(targeter),
     _writeConcern(WriteConcernOptions(WriteConcernOptions::kMajority,
                                       WriteConcernOptions::JOURNAL,
                                       writeConcernTimeout.count())),
     _lockPingNS(LockpingsType::ConfigNS),
     _locksNS(LocksType::ConfigNS) {
 }
Example #11
0
void TimerBase::threadFunction()
{
    Milliseconds begin;
    Milliseconds end(0), diff(0);

    _active = true;
    Interval aux;

    while (_run) {

        aux = _interval - (end - begin);
        end.setTimestamp();
        if (!_run) break;
        Milliseconds(aux).sleep(); // dorme de 5 em 5ms

        begin.setTimestamp();

        if (!_run) break;
        timeout();
        if (_singleShot) break;
    }
    _active = false;
    _run = false;
}
Status ServiceExecutorReserved::shutdown(Milliseconds timeout) {
    LOG(3) << "Shutting down reserved executor";

    stdx::unique_lock<stdx::mutex> lock(_mutex);
    _stillRunning.store(false);
    _threadWakeup.notify_all();

    bool result = _shutdownCondition.wait_for(lock, timeout.toSystemDuration(), [this]() {
        return _numRunningWorkerThreads.load() == 0;
    });

    return result
        ? Status::OK()
        : Status(ErrorCodes::Error::ExceededTimeLimit,
                 "reserved executor couldn't shutdown all worker threads within time limit.");
}
      //-----------------------------------------------------------------------
      HTTP::HTTPQuery::HTTPQuery(
                                 const make_private &,
                                 HTTPPtr outer,
                                 IHTTPQueryDelegatePtr delegate,
                                 bool isPost,
                                 const char *userAgent,
                                 const char *url,
                                 const BYTE *postData,
                                 size_t postDataLengthInBytes,
                                 const char *postDataMimeType,
                                 Milliseconds timeout
                                 ) :
        SharedRecursiveLock(outer ? *outer : SharedRecursiveLock::create()),
        MessageQueueAssociator(IHelper::getServiceQueue()),
        mOuter(outer),
        mDelegate(IHTTPQueryDelegateProxy::create(Helper::getServiceQueue(), delegate)),
        mIsPost(isPost),
        mUserAgent(userAgent),
        mURL(url),
        mMimeType(postDataMimeType),
        mTimeout(timeout),
        mStatusCode(HttpStatusCode::None)
      {
        ZS_LOG_DEBUG(log("created"))

        if (0 != postDataLengthInBytes) {
          mPostData.CleanNew(postDataLengthInBytes);
          memcpy(mPostData.BytePtr(), postData, postDataLengthInBytes);
        }

        ZS_EVENTING_8(
                      x, i, Debug, ServicesHttpQueryCreate, os, Http, Start,
                      puid, id, mID,
                      bool, isPost, mIsPost,
                      string, userAgent, mUserAgent,
                      string, url, mURL,
                      buffer, postData, postData,
                      size, postSize, postDataLengthInBytes,
                      string, postDataMimeType, postDataMimeType,
                      duration, timeout, timeout.count()
                      );
      }
Example #14
0
void Timer::start(const DateTime& startTime, const Milliseconds& interval)
{
    if (interval <= Timespan(0))
        throw std::logic_error("cannot run interval timer without interval");

    if (_active)
        stop();
    
    _active = true;
    _interval = interval;
    _once = false;

    Timespan systemTime = Clock::getSystemTicks();
    struct tm tim;
    time_t sec = static_cast<time_t>(systemTime.totalSeconds());
    localtime_r(&sec, &tim);
    DateTime now(tim.tm_year + 1900, tim.tm_mon + 1, tim.tm_mday,
                 tim.tm_hour, tim.tm_min, tim.tm_sec,
                 0, systemTime.totalUSecs() % 1000000);
    if (startTime > now)
    {
        _finished = systemTime + (startTime - now);
    }
    else
    {
        // startTime =< now
        Timespan elapsed = now - startTime;
        uint64_t ticksElapsed = elapsed.totalMSecs() / interval.totalMSecs();
        DateTime tickTime = startTime + (ticksElapsed + 1) * Timespan(interval);
        Timespan delay = tickTime - now;
        _finished = systemTime + (tickTime - now);
    }

    if (_selector)
        _selector->onTimerChanged(*this);
}
Example #15
0
	inline void Formatter(FormatData& formatData, const Milliseconds& milliseconds)
	{
		Formatter(formatData, milliseconds.count());

		formatData.string.append(U"ms", 2);
	}
Example #16
0
ConnectionPool::ConnectionList::iterator ConnectionPool::acquireConnection(
    const HostAndPort& target, Date_t now, Milliseconds timeout) {
    stdx::unique_lock<stdx::mutex> lk(_mutex);

    // Clean up connections on stale/unused hosts
    _cleanUpStaleHosts_inlock(now);

    for (HostConnectionMap::iterator hostConns;
         (hostConns = _connections.find(target)) != _connections.end();) {
        // Clean up the requested host to remove stale/unused connections
        _cleanUpOlderThan_inlock(now, &hostConns->second);

        if (hostConns->second.empty()) {
            // prevent host from causing unnecessary cleanups
            _lastUsedHosts[hostConns->first] = kNeverTooStale;
            break;
        }

        _inUseConnections.splice(
            _inUseConnections.begin(), hostConns->second, hostConns->second.begin());

        const ConnectionList::iterator candidate = _inUseConnections.begin();
        lk.unlock();

        try {
            if (candidate->conn->isStillConnected()) {
                // setSoTimeout takes a double representing the number of seconds for send and
                // receive timeouts.  Thus, we must take count() and divide by
                // 1000.0 to get the number of seconds with a fractional part.
                candidate->conn->setSoTimeout(timeout.count() / 1000.0);
                return candidate;
            }
        } catch (...) {
            lk.lock();
            _destroyConnection_inlock(&_inUseConnections, candidate);
            throw;
        }

        lk.lock();
        _destroyConnection_inlock(&_inUseConnections, candidate);
    }

    // No idle connection in the pool; make a new one.
    lk.unlock();

    std::unique_ptr<DBClientConnection> conn(new DBClientConnection());

    // setSoTimeout takes a double representing the number of seconds for send and receive
    // timeouts.  Thus, we must take count() and divide by 1000.0 to get the number
    // of seconds with a fractional part.
    conn->setSoTimeout(timeout.count() / 1000.0);

    if (_hook) {
        uassertStatusOK(
            conn->connect(target,
                          [this, &target](const executor::RemoteCommandResponse& isMasterReply) {
                              return _hook->validateHost(target, isMasterReply);
                          }));

        auto postConnectRequest = uassertStatusOK(_hook->makeRequest(target));

        // We might not have a postConnectRequest
        if (postConnectRequest != boost::none) {
            auto start = Date_t::now();
            auto reply =
                conn->runCommandWithMetadata(postConnectRequest->dbname,
                                             postConnectRequest->cmdObj.firstElementFieldName(),
                                             postConnectRequest->metadata,
                                             postConnectRequest->cmdObj);

            auto rcr = executor::RemoteCommandResponse(reply->getCommandReply().getOwned(),
                                                       reply->getMetadata().getOwned(),
                                                       Date_t::now() - start);

            uassertStatusOK(_hook->handleReply(target, std::move(rcr)));
        }
    } else {
        uassertStatusOK(conn->connect(target));
    }

    conn->port().tag |= _messagingPortTags;

    if (getGlobalAuthorizationManager()->isAuthEnabled()) {
        uassert(ErrorCodes::AuthenticationFailed,
                "Missing credentials for authenticating as internal user",
                isInternalAuthSet());
        conn->auth(getInternalUserAuthParamsWithFallback());
    }

    lk.lock();
    return _inUseConnections.insert(_inUseConnections.begin(), ConnectionInfo(conn.release(), now));
}
Example #17
0
int main()
{
	{
		Months months(23);
		cout << months.getValue() << " months" << endl;
		Days days = months;
		cout << days.getValue() << " days" << endl;
		Hours hrs(months);
		cout << hrs.getValue() << " hrs." << endl;
		Minutes m(hrs);
		cout << m.getValue() << " min." << endl;
		Seconds s(m);
		cout << s.getValue() << " sec." << endl;
		Milliseconds ms;
		ms = s;
		cout << ms.getValue() << " ms" << endl;
	}

	cout << "---------------------\n";

	{
		Minutes min(10);
		cout << min.getValue() << " min. " << endl;
		Seconds sec = min;
		cout << sec.getValue() << " sec. " << endl;
	}

	cout << "---------------------\n";

	{
		Miles miles(24859); //circumfrence of the Earth
		cout << miles.getValue() << " miles" << endl;
		Yards yards(miles);
		cout << yards.getValue() << " yd." << endl;
		Feet feet(miles);
		cout << feet.getValue() << " ft." << endl;
		Inches inches(miles);
		cout << inches.getValue() << " in." << endl;
	}

	cout << "---------------------\n";

	{
		Minutes minutes(1000000ULL);
		Yards yards(minutes);
		cout << "There are " << (yards.getValue()/1000000.0) << " yards in a minute!\n";
	}

	cout << "---------------------\n";

	{
		Inches inches;
		Feet feet;
		Yards yards;

		string s = "12 12 12";

		std::istringstream iss(s);

		iss >> inches >> feet >> yards;

		cout << inches << ' ' << feet << ' ' << yards << endl;

		cout << inches << ' ' << ((Inches)feet) << ' ' << ((Inches)yards) << endl;
	}

	cout << "---------------------\n";

	{
		Radians r;

		r = 3.14159;

		cout << r << " radians is " << (Degrees)r << " degrees\n";

		Degrees d(r);

		Degrees d2;

		d2 = r;

		cout << (Degrees)r << " == " << d << " == " << d2 << endl;
	}

	system("PAUSE");

	return 0;
}
Example #18
0
void MessagingPort::setTimeout(Milliseconds millis) {
    double timeout = double(millis.count()) / 1000;
    _psock->setTimeout(timeout);
}
    void LegacyDistLockPinger::_distLockPingThread(ConnectionString addr,
                                                   const string& process,
                                                   Milliseconds sleepTime) {
        setThreadName("LockPinger");

        string pingId = pingThreadId(addr, process);

        LOG(0) << "creating distributed lock ping thread for " << addr
               << " and process " << process << " (sleeping for " << sleepTime.count() << "ms)";

        static int loops = 0;
        Date_t lastPingTime = jsTime();
        while (!shouldStopPinging(addr, process)) {

            LOG(3) << "distributed lock pinger '" << pingId << "' about to ping.";

            Date_t pingTime;

            try {
                ScopedDbConnection conn(addr.toString(), 30.0);

                pingTime = jsTime();
                const auto  elapsed = pingTime - lastPingTime;
                if (elapsed > 10 * sleepTime) {
                    warning() << "Lock pinger for addr: " << addr
                              << ", proc: " << process
                              << " was inactive for " << elapsed;
                }

                lastPingTime = pingTime;

                // Refresh the entry corresponding to this process in the lockpings collection.
                conn->update(LockpingsType::ConfigNS,
                             BSON(LockpingsType::process(process)),
                             BSON("$set" << BSON(LockpingsType::ping(pingTime))),
                             true);

                string err = conn->getLastError();
                if (!err.empty()) {
                    warning() << "pinging failed for distributed lock pinger '" << pingId << "'."
                              << causedBy(err);
                    conn.done();

                    if (!shouldStopPinging(addr, process)) {
                        waitTillNextPingTime(sleepTime);
                    }
                    continue;
                }

                // Remove really old entries from the lockpings collection if they're not
                // holding a lock. This may happen if an instance of a process was taken down
                // and no new instance came up to replace it for a quite a while.
                // NOTE this is NOT the same as the standard take-over mechanism, which forces
                // the lock entry.
                BSONObj fieldsToReturn = BSON(LocksType::state() << 1
                                              << LocksType::process() << 1);
                auto activeLocks =
                        conn->query(LocksType::ConfigNS,
                                    BSON(LocksType::state() << NE << LocksType::UNLOCKED));

                uassert(16060,
                        str::stream() << "cannot query locks collection on config server "
                                      << conn.getHost(),
                        activeLocks.get());

                std::set<string> pids;
                while (activeLocks->more()) {
                    BSONObj lock = activeLocks->nextSafe();

                    if (!lock[LocksType::process()].eoo()) {
                        pids.insert(lock[LocksType::process()].str());
                    }
                    else {
                        warning() << "found incorrect lock document during lock ping cleanup: "
                                  << lock.toString();
                    }
                }

                // This can potentially delete ping entries that are actually active (if the clock
                // of another pinger is too skewed). This is still fine as the lock logic only
                // checks if there is a change in the ping document and the document going away
                // is a valid change.
                Date_t fourDays = pingTime - stdx::chrono::hours{4 * 24};
                conn->remove(LockpingsType::ConfigNS,
                             BSON(LockpingsType::process() << NIN << pids
                                  << LockpingsType::ping() << LT << fourDays));
                err = conn->getLastError();

                if (!err.empty()) {
                    warning() << "ping cleanup for distributed lock pinger '" << pingId
                              << " failed." << causedBy(err);
                    conn.done();

                    if (!shouldStopPinging(addr, process)) {
                        waitTillNextPingTime(sleepTime);
                    }
                    continue;
                }

                LOG(1 - (loops % 10 == 0 ? 1 : 0)) << "cluster " << addr
                        << " pinged successfully at " << pingTime
                        << " by distributed lock pinger '" << pingId
                        << "', sleeping for " << sleepTime.count() << "ms";

                // Remove old locks, if possible
                // Make sure no one else is adding to this list at the same time
                boost::lock_guard<boost::mutex> lk(_mutex);

                int numOldLocks = _unlockList.size();
                if (numOldLocks > 0) {
                    LOG(0) << "trying to delete " << _unlockList.size()
                           << " old lock entries for process " << process;
                }

                bool removed = false;
                for (auto iter = _unlockList.begin(); iter != _unlockList.end();
                        iter = (removed ? _unlockList.erase(iter) : ++iter)) {
                    removed = false;
                    try {
                        // Got DistLockHandle from lock, so we don't need to specify _id again
                        conn->update(LocksType::ConfigNS,
                                     BSON(LocksType::lockID(*iter)),
                                     BSON("$set" << BSON( LocksType::state(LocksType::UNLOCKED))));

                        // Either the update went through or it didn't,
                        // either way we're done trying to unlock.
                        LOG(0) << "handled late remove of old distributed lock with ts " << *iter;
                        removed = true;
                    }
                    catch (UpdateNotTheSame&) {
                        LOG(0) << "partially removed old distributed lock with ts " << *iter;
                        removed = true;
                    }
                    catch (std::exception& e) {
                        warning() << "could not remove old distributed lock with ts " << *iter
                                  << causedBy(e);
                    }

                }

                if (numOldLocks > 0 && _unlockList.size() > 0) {
                    LOG(0) << "not all old lock entries could be removed for process " << process;
                }

                conn.done();

            }
            catch (std::exception& e) {
                warning() << "distributed lock pinger '" << pingId
                          << "' detected an exception while pinging." << causedBy(e);
            }

            if (!shouldStopPinging(addr, process)) {
                waitTillNextPingTime(sleepTime);
            }
        }

        warning() << "removing distributed lock ping thread '" << pingId << "'";

        if (shouldStopPinging(addr, process)) {
            acknowledgeStopPing(addr, process);
        }
    }
Example #20
0
    ConnectionPool::ConnectionList::iterator ConnectionPool::acquireConnection(
                                                                const HostAndPort& target,
                                                                Date_t now,
                                                                Milliseconds timeout) {
        boost::unique_lock<boost::mutex> lk(_mutex);

        // Clean up connections on stale/unused hosts
        _cleanUpStaleHosts_inlock(now);

        for (HostConnectionMap::iterator hostConns;
             ((hostConns = _connections.find(target)) != _connections.end());) {

            // Clean up the requested host to remove stale/unused connections
            _cleanUpOlderThan_inlock(now, &hostConns->second);
            if (hostConns->second.empty()) {
                // prevent host from causing unnecessary cleanups
                _lastUsedHosts[hostConns->first] = kNeverTooStale;
                break;
            }

            _inUseConnections.splice(_inUseConnections.begin(),
                                     hostConns->second,
                                     hostConns->second.begin());

            const ConnectionList::iterator candidate = _inUseConnections.begin();
            lk.unlock();
            try {
                if (candidate->conn->isStillConnected()) {
                    // setSoTimeout takes a double representing the number of seconds for send and
                    // receive timeouts.  Thus, we must take count() and divide by
                    // 1000.0 to get the number of seconds with a fractional part.
                    candidate->conn->setSoTimeout(timeout.count() / 1000.0);
                    return candidate;
                }
            }
            catch (...) {
                lk.lock();
                _destroyConnection_inlock(&_inUseConnections, candidate);
                throw;
            }

            lk.lock();
            _destroyConnection_inlock(&_inUseConnections, candidate);
        }

        // No idle connection in the pool; make a new one.
        lk.unlock();
        std::auto_ptr<DBClientConnection> conn(new DBClientConnection);

        // setSoTimeout takes a double representing the number of seconds for send and receive
        // timeouts.  Thus, we must take count() and divide by 1000.0 to get the number
        // of seconds with a fractional part.
        conn->setSoTimeout(timeout.count() / 1000.0);
        std::string errmsg;
        uassert(28640,
                str::stream() << "Failed attempt to connect to "
                              << target.toString() << "; " << errmsg,
                conn->connect(target, errmsg));

        conn->port().tag |= _messagingPortTags;

        if (getGlobalAuthorizationManager()->isAuthEnabled()) {
            uassert(ErrorCodes::AuthenticationFailed,
                    "Missing credentials for authenticating as internal user",
                    isInternalAuthSet());
            conn->auth(getInternalUserAuthParamsWithFallback());
        }

        lk.lock();
        return _inUseConnections.insert(_inUseConnections.begin(),
                                        ConnectionInfo(conn.release(), now));
    }
Example #21
0
bool operator>=(const Milliseconds& m, int v)
{
    return m.getTime() >= v;
}
AsyncTimerASIO::AsyncTimerASIO(asio::io_service::strand* strand, Milliseconds expiration)
    : _strand(strand), _timer(_strand->get_io_service(), expiration.toSystemDuration()) {}
Example #23
0
bool operator<(const Milliseconds& m, int v)
{
    return m.getTime() < v;
}