bool Throttle::requestPermission(int numActions, const bsls::TimeInterval& now) { #if defined(BSLS_ASSERT_IS_ACTIVE) BSLS_ASSERT(0 < numActions); BSLS_ASSERT(now.seconds() <= k_MAX_SECONDS); BSLS_ASSERT(now.seconds() >= k_MIN_SECONDS); const Int64 sns = k_BILLION * now.seconds(); // Seconds in NanoSeconds if (0 <= sns) { BSLS_ASSERT(LLONG_MAX - sns > now.nanoseconds()); } else { BSLS_ASSERT(LLONG_MIN - sns < now.nanoseconds()); } #endif if (d_maxSimultaneousActions < numActions) { BSLS_ASSERT(0 == d_maxSimultaneousActions); // It is best to deal with the 'allow none' case here. We have to do // the above two conditions just for asserts on our way into the // method, and if we don't handle the // 'd_maxSimultaneousActions < numActions' case here we risk undefined // behavior due to signed arithmetic overflow later on. return false; // RETURN } // Testing for 'k_ALLOW_ALL' here prevents undefined behavior later in the // function due to overflowing signed arithmetic. if (k_ALLOW_ALL == d_nanosecondsPerAction) { return true; // RETURN } const Int64 currentTime = now.totalNanoseconds(); const Int64 requiredTime = numActions * d_nanosecondsPerAction; const Int64 lagTime = d_nanosecondsPerTotalReset - requiredTime; Int64 prevLeakTime = AtomicOps::getInt64Acquire(&d_prevLeakTime); while (true) { const Int64 timeDiff = currentTime - prevLeakTime; if (timeDiff < requiredTime) { return false; // RETURN } const Int64 nextLeakTime = d_nanosecondsPerTotalReset <= timeDiff ? currentTime - lagTime : prevLeakTime + requiredTime; const Int64 swappedLeakTime = AtomicOps::testAndSwapInt64AcqRel( &d_prevLeakTime, prevLeakTime, nextLeakTime); if (swappedLeakTime == prevLeakTime) { return true; // RETURN } prevLeakTime = swappedLeakTime; } }
// PRIVATE MANIPULATORS int ChannelPoolChannel::addReadQueueEntry( int numBytes, const BlobBasedReadCallback& callback, const bsls::TimeInterval& timeOut) { if (d_closed) { return -6; // RETURN } BSLS_ASSERT(0 < numBytes); d_readQueue.push_back(ReadQueueEntry()); ReadQueueEntry& entry = d_readQueue.back(); entry.d_numBytesNeeded = numBytes; entry.d_timeOut = timeOut; entry.d_timeOutTimerId = 0; entry.d_progress = AsyncChannel::e_SUCCESS; entry.d_readCallback = callback; if (0 != timeOut.totalMicroseconds()) { registerTimeoutAndUpdateClockId(timeOut); entry.d_timeOutTimerId = d_nextClockId; } if (1 == d_readQueue.size()) { d_channelPool_p->enableRead(d_channelId); } return 0; }
bool Throttle::requestPermission(const bsls::TimeInterval& now) { #if defined(BSLS_ASSERT_IS_ACTIVE) BSLS_ASSERT(now.seconds() <= k_MAX_SECONDS); BSLS_ASSERT(now.seconds() >= k_MIN_SECONDS); const Int64 sns = k_BILLION * now.seconds(); // Seconds in NanoSeconds if (0 <= sns) { BSLS_ASSERT(LLONG_MAX - sns > now.nanoseconds()); } else { BSLS_ASSERT(LLONG_MIN - sns < now.nanoseconds()); } #endif // Special casing 'allow all' here prevents undefined behavior later in // the function due to overflowing signed arithmetic. if (k_ALLOW_ALL == d_nanosecondsPerAction) { return true; // RETURN } const Int64 currentTime = now.totalNanoseconds(); Int64 prevLeakTime = AtomicOps::getInt64Acquire(&d_prevLeakTime); while (true) { const Int64 timeDiff = currentTime - prevLeakTime; if (timeDiff < d_nanosecondsPerAction) { return false; // RETURN } const Int64 nextLeakTime = d_nanosecondsPerTotalReset <= timeDiff ? currentTime - d_nanosecondsPerTotalReset + d_nanosecondsPerAction : prevLeakTime + d_nanosecondsPerAction; const Int64 swappedLeakTime = AtomicOps::testAndSwapInt64AcqRel( &d_prevLeakTime, prevLeakTime, nextLeakTime); if (swappedLeakTime == prevLeakTime) { return true; // RETURN } prevLeakTime = swappedLeakTime; } }
int Throttle::requestPermissionIfValid(bool *result, int numActions, const bsls::TimeInterval& now) { if (numActions <= 0 || (d_maxSimultaneousActions < numActions && 0 != d_maxSimultaneousActions)) { return -1; // RETURN } if (k_MAX_SECONDS < now.seconds() || now.seconds() < k_MIN_SECONDS) { return -1; // RETURN } const Int64 sns = k_BILLION * now.seconds(); // Seconds in NanoSeconds if (0 <= sns ? LLONG_MAX - sns < now.nanoseconds() : LLONG_MIN - sns > now.nanoseconds()) { return -1; // RETURN } *result = this->requestPermission(numActions, now); return 0; }
CalendarCache::CalendarCache(CalendarLoader *loader, const bsls::TimeInterval& timeout, bslma::Allocator *basicAllocator) // We have to supply 'bsl::less<key>()' because 'bsl::map' does not have a // constructor that takes only an allocator. : d_cache(bsl::less<bsl::string>(), basicAllocator) , d_loader_p(loader) , d_timeOut(static_cast<bsl::time_t>(timeout.seconds())) , d_hasTimeOutFlag(true) , d_lock() , d_allocator_p(bslma::Default::allocator(basicAllocator)) { BSLS_ASSERT(loader); BSLS_ASSERT(bsls::TimeInterval(0) <= timeout); BSLS_ASSERT(timeout <= bsls::TimeInterval(INT_MAX)); }
void debugprint(const bsls::TimeInterval& timeInterval) // Print the specified 'timeInterval' to the console. Note that this free // function overload works in coordination with 'bsls_bsltestutil'. { debugprint(timeInterval.totalSecondsAsDouble()); }