// CREATORS StackTraceTestAllocator::StackTraceTestAllocator( bslma::Allocator *basicAllocator) : d_magic(k_STACK_TRACE_TEST_ALLOCATOR_MAGIC) , d_numBlocksInUse(0) , d_blocks(0) , d_mutex() , d_name("<unnamed>") , d_failureHandler(bsl::allocator_arg_t(), bsl::allocator<FailureHandler>(basicAllocator ? basicAllocator : &bslma::MallocFreeAllocator::singleton())) , d_maxRecordedFrames(k_DEFAULT_NUM_RECORDED_FRAMES + k_IGNORE_FRAMES) , d_traceBufferLength(getTraceBufferLength(k_DEFAULT_NUM_RECORDED_FRAMES)) , d_ostream(&bsl::cerr) , d_demangleFlag(true) , d_allocator_p(basicAllocator ? basicAllocator : &bslma::MallocFreeAllocator::singleton()) { BSLS_ASSERT_SAFE(d_maxRecordedFrames >= k_DEFAULT_NUM_RECORDED_FRAMES); BSLS_ASSERT_SAFE(d_traceBufferLength >= d_maxRecordedFrames); // This must be assigned in a statement in the body of the c'tor rather // than in the initializer list to work around a microsoft bug with // function pointers. d_failureHandler = &failAbort; }
// CREATORS DayOfWeekSet_Iter::DayOfWeekSet_Iter(int data, int index) : d_data( static_cast<unsigned char>(data)) , d_index(static_cast<signed char>(index)) { BSLS_ASSERT_SAFE(0 == (data & 1)); BSLS_ASSERT_SAFE(index >= 0 && index <= 8); while (d_index < 8 && !((1 << d_index) & d_data)) { ++d_index; } }
Decimal64 DecimalUtil::multiplyByPowerOf10(Decimal64 value, Decimal64 exponent) { BSLS_ASSERT_SAFE( makeDecimal64(-1999999997, 0) <= exponent); BSLS_ASSERT_SAFE( exponent <= makeDecimal64(99999999, 0)); Decimal64 result = value; decDoubleScaleB(result.data(), value.data(), exponent.data(), getContext()); return result; }
void BidirectionalLinkListUtil::spliceListBeforeTarget (BidirectionalLink *first, BidirectionalLink *last, BidirectionalLink *target) { BSLS_ASSERT_SAFE(first); BSLS_ASSERT_SAFE(last); #ifdef BDE_BUILD_TARGET_SAFE_2 // Test to avoid creating loops is O(N) expensive, so check only in SAFE_2 BidirectionalLink *cursor = first; while(cursor != last->nextLink()) { BSLS_ASSERT_SAFE(cursor != target); cursor = cursor->nextLink(); } BSLS_ASSERT_SAFE(isWellFormed(first, last)); #endif // unlink from existing list if (BidirectionalLink* prev = first->previousLink()) { prev->setNextLink(last->nextLink()); } if (BidirectionalLink* next = last->nextLink()) { next->setPreviousLink(first->previousLink()); } // update into spliced location: if (!target) { // Prepending target an empty list is *explicitly* *allowed* // The "spliced" segment is still extracted from the original list first->setPreviousLink(0); // redundant with pre-condition last->setNextLink(0); } else { if (BidirectionalLink *prev = target->previousLink()) { first->setPreviousLink(prev); prev->setNextLink(first); } else { first->setPreviousLink(0); } last->setNextLink(target); target->setPreviousLink(last); } }
void Blob::appendDataBuffer(const BlobBuffer& buffer) { const int bufferSize = buffer.size(); const int oldDataLength = d_dataLength; d_totalSize += bufferSize; d_dataLength += bufferSize; if (d_totalSize == d_dataLength) { // Fast path. At the start, we had 0 or more buffers in the blob and // they were all full. BSLS_ASSERT_SAFE(d_dataIndex == (int) d_buffers.size() - 1 || (0 == d_dataIndex && 0 == d_buffers.size())); d_buffers.push_back(buffer); d_preDataIndexLength = oldDataLength; d_dataIndex = static_cast<int>(d_buffers.size()) - 1; } else if (bufferSize == d_dataLength) { // Another fast path. At the start, there was no data, but empty // buffers were present. Put the new buffer at the front. BSLS_ASSERT_SAFE(d_totalSize > d_dataLength); BSLS_ASSERT_SAFE(0 == d_dataIndex); BSLS_ASSERT_SAFE(0 == d_preDataIndexLength); d_buffers.insert(d_buffers.begin(), buffer); } else { // Complicated case -- at the start, buffer(s) with data were present, // trimming 'prevBuf' might or might not be necessary, empty space was // present on the end, whole empty buffer(s) might or might not have // been present on the end. BSLS_ASSERT_SAFE(d_dataLength > bufferSize); BSLS_ASSERT_SAFE(d_dataLength < d_totalSize); BSLS_ASSERT_SAFE((unsigned) d_dataIndex < d_buffers.size()); BSLS_ASSERT_SAFE(oldDataLength >= d_preDataIndexLength); BlobBuffer& prevBuf = d_buffers[d_dataIndex]; const unsigned newPrevBufSize = oldDataLength - d_preDataIndexLength; const unsigned trim = prevBuf.size() - newPrevBufSize; BSLS_ASSERT_SAFE(trim <= (unsigned) prevBuf.size()); prevBuf.setSize(newPrevBufSize); ++d_dataIndex; d_buffers.insert(d_buffers.begin() + d_dataIndex, buffer); d_preDataIndexLength = oldDataLength; d_totalSize -= trim; } }
bslalg::HashTableBucket *HashTable_ImpDetails::defaultBucketAddress() { static bslalg::HashTableBucket s_bucket = {0 , 0}; // Aggregate initialization // of a POD should be thread- // safe static initialization // These two tests should not be necessary, but will catch corruption in // components that try to write to the shared bucket. BSLS_ASSERT_SAFE(!s_bucket.first()); BSLS_ASSERT_SAFE(!s_bucket.last()); return &s_bucket; }
double PeriodDayCountUtil::yearsDiff( const bdlt::Date& beginDate, const bdlt::Date& endDate, const bsl::vector<bdlt::Date>& periodDate, double periodYearDiff, DayCountConvention::Enum convention) { BSLS_ASSERT(periodDate.size() >= 2); BSLS_ASSERT(periodDate.front() <= beginDate); BSLS_ASSERT( beginDate <= periodDate.back()); BSLS_ASSERT(periodDate.front() <= endDate); BSLS_ASSERT( endDate <= periodDate.back()); BSLS_ASSERT_SAFE(isSortedAndUnique(periodDate.begin(), periodDate.end())); double numYears; switch (convention) { case DayCountConvention::e_PERIOD_ICMA_ACTUAL_ACTUAL: { numYears = bbldc::PeriodIcmaActualActual::yearsDiff(beginDate, endDate, periodDate, periodYearDiff); } break; default: { BSLS_ASSERT_OPT(0 && "Unrecognized convention"); numYears = 0.0; } break; } return numYears; }
void BidirectionalLinkListUtil::insertLinkBeforeTarget( BidirectionalLink *newNode, BidirectionalLink *target) { BSLS_ASSERT(newNode); #ifdef BDE_BUILD_TARGET_SAFE_2 BSLS_ASSERT_SAFE(isWellFormed(target, target)); #endif // Prepending before an empty list is *explicitly* *allowed* if (!target) { newNode->reset(); } else if (BidirectionalLink *prev = target->previousLink()) { newNode->setPreviousLink(prev); prev->setNextLink(newNode); newNode->setNextLink(target); target->setPreviousLink(newNode); } else { newNode->setPreviousLink(0); // asserted precondition newNode->setNextLink(target); target->setPreviousLink(newNode); } }
void baltzo::TimeZoneUtilImp::createLocalTimePeriod( LocalTimePeriod *result, const Zoneinfo::TransitionConstIterator& transition, const Zoneinfo& timeZone) { BSLS_ASSERT(result); BSLS_ASSERT(transition != timeZone.endTransitions()); BSLS_ASSERT_SAFE(ZoneinfoUtil::isWellFormed(timeZone)); result->setDescriptor(transition->descriptor()); // The transition times in 'timeZone' are guaranteed to be in the range // representable by 'bdlt::Datetime'. bdlt::Datetime utcStartTime(1, 1, 1, 0, 0, 0); Zoneinfo::convertFromTimeT64( &utcStartTime, transition->utcTime()); Zoneinfo::TransitionConstIterator next = transition; ++next; // 'utcEndTime' must account for the special case that the time falls // after the last transition. bdlt::Datetime nextUtcTime; Zoneinfo::convertFromTimeT64(&nextUtcTime, next->utcTime()); bdlt::Datetime utcEndTime = (next == timeZone.endTransitions()) ? bdlt::Datetime(9999, 12, 31, 23, 59, 59, 999) : nextUtcTime; result->setDescriptor(transition->descriptor()); result->setUtcStartAndEndTime(utcStartTime, utcEndTime); }
const char *String::strstrCaseless(const char *string, int stringLen, const char *subString, int subStringLen) { BSLS_ASSERT(string || 0 == stringLen); BSLS_ASSERT( 0 <= stringLen); BSLS_ASSERT(subString || 0 == subStringLen); BSLS_ASSERT( 0 <= subStringLen); if (0 == subStringLen) { return string; // RETURN } if (stringLen < subStringLen) { return 0; // RETURN } BSLS_ASSERT_SAFE(string); // impossible to fail const char *end = string + stringLen - subStringLen; for (const char *p = string; p <= end; ++p) { if (areEqualCaseless(p, subStringLen, subString, subStringLen)) { return p; // RETURN } } return 0; }
const char *String::strrstrCaseless(const char *string, int stringLen, const char *subString, int subStringLen) { BSLS_ASSERT(string || 0 == stringLen); BSLS_ASSERT( 0 <= stringLen); BSLS_ASSERT(subString || 0 == subStringLen); BSLS_ASSERT( 0 <= subStringLen); if (0 == subStringLen) { return string + stringLen; // RETURN } if (stringLen < subStringLen) { return 0; // RETURN } BSLS_ASSERT_SAFE(string); // impossible to fail for (int i = stringLen - subStringLen; i >= 0; --i) { const char *p = string + i; if (areEqualCaseless(p, subStringLen, subString, subStringLen)) { return p; // RETURN } } return 0; }
// MANIPULATORS void *StackTraceTestAllocator::allocate(size_type size) { if (0 == size) { return 0; // RETURN } bslmt::LockGuard<bslmt::Mutex> guard(&d_mutex); // The underlying allocator might align the block differently depending on // the size passed. The alignment must be large enough to accommodate the // stack addresses (type 'void *') in the buffer, it must be large enough // to accommodate the 'BlockHeader's alignment requirements, and it must be // large enough to accommodate whatever the alignment requirements of // whatever the client intends to store in their portion of the block. We // can infer the requirements of our pointers and block header at compile // time in 'k_FIXED_ALIGN', then we infer the alignment requirement of the // client's section from the size passed, and take the maximum of the two // to get the alignment required. We then round the size we will pass to // the underlying allocator up to a multiple of 'align', so that the // underlying allocator cannot infer a lower value of the alignment // requirement. enum { k_FIXED_ALIGN = Max<bsls::AlignmentFromType<void *>::VALUE, bsls::AlignmentFromType<BlockHeader>::VALUE>::VALUE }; const int align = bsl::max<int>( k_FIXED_ALIGN, bsls::AlignmentUtil::calculateAlignmentFromSize(size)); const int lowBits = align - 1; BSLS_ASSERT_SAFE(0 == (align & lowBits)); // verify 'align' is power of 2 size = (size + lowBits) & ~lowBits; // round 'size' up to multiple of // 'align' void **framesBegin = (void **) d_allocator_p->allocate( d_traceBufferLength * sizeof(void *) + sizeof(BlockHeader) + size); BlockHeader *blockHdr = reinterpret_cast<BlockHeader*>( framesBegin + d_traceBufferLength); new (blockHdr) BlockHeader(d_blocks, &d_blocks, this, k_ALLOCATED_BLOCK_MAGIC); if (d_blocks) { d_blocks->d_prevNext_p = &blockHdr->d_next_p; } d_blocks = blockHdr; bsl::fill(framesBegin, framesBegin + d_maxRecordedFrames, (void *) 0); AddressUtil::getStackAddresses(framesBegin, d_maxRecordedFrames); void *ret = blockHdr + 1; BSLS_ASSERT(0 == ((UintPtr) ret & ((sizeof(void *) - 1) | lowBits))); ++d_numBlocksInUse; return ret; }
size_t HashTable_ImpDetails::growBucketsForLoadFactor(size_t *capacity, size_t minElements, size_t requestedBuckets, double maxLoadFactor) { BSLS_ASSERT_SAFE( 0 != capacity); BSLS_ASSERT_SAFE( 0 < minElements); BSLS_ASSERT_SAFE( 0 < requestedBuckets); BSLS_ASSERT_SAFE(0.0 < maxLoadFactor); const size_t MAX_SIZE_T = native_std::numeric_limits<size_t>::max(); const double MAX_AS_DBL = static_cast<double>(MAX_SIZE_T); // This check is why 'minElements' must be at least one - so that we do not // allocate a number of buckets that cannot hold at least one element, and // then throw the unexpected 'logic_error' on the first 'insert'. We make // it a pre-condition of the function, as some callers have contextual // knowledge that the argument must be non-zero, and so avoid a redundant // 'min' call. The 'static_cast<double>' addresses warnings on 64-bit // systems. The truncation that occurs in such cases does not impact the // final result, so we do not need any deeper analysis in such cases. double d = static_cast<double>(minElements) / maxLoadFactor; if (d > MAX_AS_DBL) { // Throw a 'std::length_error' exception if 'd' is larger than the // highest unsigned value representable by 'size_t'. StdExceptUtil::throwLengthError("The number of buckets overflows."); } for (size_t result = native_std::max(requestedBuckets, static_cast<size_t>(d)); ; result *= 2) { result = nextPrime(result); // throws if too large double newCapacity = static_cast<double>(result) * maxLoadFactor; if (static_cast<double>(minElements) <= newCapacity) { // Set '*capacity' to the integer value corresponding to // 'newCapacity', or the highest unsigned value representable by // 'size_t' if 'newCapacity' is larger. *capacity = newCapacity < MAX_AS_DBL ? static_cast<size_t>(newCapacity) : MAX_SIZE_T; return result; // RETURN } } }
void SharedPtrRep::resetCountsRaw(int numSharedReferences, int numWeakReferences) { BSLS_ASSERT_SAFE(0 <= numSharedReferences); BSLS_ASSERT_SAFE(0 <= numWeakReferences); // These reference counts can be relaxed because access to this // 'SharedPtrRep' must be serialized when calling this function (as // specified in the function-level doc). d_adjustedSharedCount.storeRelaxed(2 * numSharedReferences + (numWeakReferences ? 1 : 0)); // minimum consistency: relaxed d_adjustedWeakCount.storeRelaxed(2 * numWeakReferences + (numSharedReferences ? 1 : 0)); // minimum consistency: relaxed }
// CREATORS Blob::Blob(bslma::Allocator *basicAllocator) : d_buffers(basicAllocator) , d_totalSize(0) , d_dataLength(0) , d_dataIndex(0) , d_preDataIndexLength(0) , d_bufferFactory_p(InvalidBlobBufferFactory::factory(0)) { BSLS_ASSERT_SAFE(0 == assertInvariants()); }
Blob::Blob(const Blob& original, bslma::Allocator *basicAllocator) : d_buffers(original.d_buffers, basicAllocator) , d_totalSize(original.d_totalSize) , d_dataLength(original.d_dataLength) , d_dataIndex(original.d_dataIndex) , d_preDataIndexLength(original.d_preDataIndexLength) , d_bufferFactory_p(InvalidBlobBufferFactory::factory(0)) { BSLS_ASSERT_SAFE(0 == assertInvariants()); }
static inline int decode32(const char *address) // Read the 32-bit big-endian integer in the array of bytes located at the // specified 'address' and return that value. The behavior is undefined // unless 'address' points to an accessible memory location. { BSLS_ASSERT_SAFE(address); int temp; bsl::memcpy(&temp, address, sizeof(temp)); return BSLS_BYTEORDER_BE_U32_TO_HOST(temp); }
void BidirectionalLinkListUtil::insertLinkAfterTarget( BidirectionalLink *newNode, BidirectionalLink *target) { BSLS_ASSERT_SAFE(newNode); BSLS_ASSERT_SAFE(target); BidirectionalLink *next = target->nextLink(); #ifdef BDE_BUILD_TARGET_SAFE_2 BSLS_ASSERT_SAFE(!next || isWellFormed(target, next)); #endif target->setNextLink(newNode); if (next) { next->setPreviousLink(newNode); } newNode->setPreviousLink(target); newNode->setNextLink(next); }
void BidirectionalLinkListUtil::unlink(BidirectionalLink *node) { BSLS_ASSERT_SAFE(node); BidirectionalLink *prev = node->previousLink(), *next = node->nextLink(); if (prev) { if (next) { BSLS_ASSERT_SAFE(isWellFormed(prev, next)); next->setPreviousLink(prev); prev->setNextLink(next); } else { prev->setNextLink(0); } } else if (next) { next->setPreviousLink(0); } }
// STATIC METHODS inline static bool isLastDayOfFebruary(int year, int month, int day) // Return 'true' if the specified 'day' of the specified 'month' in the // specified 'year' is the last day of February for that 'year', and // 'false' otherwise. The behavior is undefined unless 'year', 'month', // and 'day' represent a valid 'bdlt::Date' value. { BSLS_ASSERT_SAFE(bdlt::SerialDateImpUtil:: isValidYearMonthDay(year, month, day)); return 2 == month && ( 29 == day || (28 == day && !bdlt::SerialDateImpUtil::isLeapYear(year))); }
void ManagedPtr_Members::set(void *object, void *factory, DeleterFunc deleter) { // Note that 'factory' may be null if 'deleter' supports it, so cannot be // asserted here. BSLS_ASSERT_SAFE(0 != deleter || 0 == object); d_obj_p = object; if (object) { d_deleter.set(object, factory, deleter); } }
// MANIPULATORS TimeInterval& TimeInterval::addSeconds(bsls::Types::Int64 seconds) { BSLS_ASSERT_SAFE(isSumValidInt64(seconds, d_seconds)); d_seconds += seconds; if (d_seconds > 0 && d_nanoseconds < 0) { --d_seconds; d_nanoseconds += k_NANOSECS_PER_SEC; } else if (d_seconds < 0 && d_nanoseconds > 0) { ++d_seconds; d_nanoseconds -= k_NANOSECS_PER_SEC; } return *this; }
Datum DatumMapBuilder::commit() { // Make sure the map is sorted. BSLS_ASSERT_SAFE(0 == d_capacity || false == *d_mapping.sorted() || bsl::adjacent_find(d_mapping.data(), d_mapping.data() + *d_mapping.size(), compareGreater) == d_mapping.data() + *d_mapping.size()); Datum result = Datum::adoptMap(d_mapping); d_mapping = DatumMutableMapRef(); d_capacity = 0; return result; }
Blob::Blob(const BlobBuffer *buffers, int numBuffers, BlobBufferFactory *factory, bslma::Allocator *basicAllocator) : d_buffers(buffers, buffers + numBuffers, basicAllocator) , d_totalSize(0) , d_dataLength(0) , d_dataIndex(0) , d_preDataIndexLength(0) , d_bufferFactory_p(InvalidBlobBufferFactory::factory(factory)) { for (BlobBufferConstIterator it = d_buffers.begin(); it != d_buffers.end(); ++it) { BSLS_ASSERT(0 <= it->size()); d_totalSize += it->size(); } BSLS_ASSERT_SAFE(0 == assertInvariants()); }
void SharedPtrRep::releaseRef() { BSLS_ASSERT_SAFE(0 < numReferences()); const int sharedCount = d_adjustedSharedCount.add(-2); // release consistency: acquire/release if (0 == sharedCount) { disposeObject(); disposeRep(); } else if (1 == sharedCount) { disposeObject(); const int weakCount = d_adjustedWeakCount.add(-1); // release consistency: acquire/release if (0 == weakCount) { disposeRep(); } } }
RateLimiter::~RateLimiter() { BSLS_ASSERT_SAFE(sustainedRateLimit() > 0); BSLS_ASSERT_SAFE(peakRateLimit() > 0); BSLS_ASSERT_SAFE(sustainedRateWindow() > bsls::TimeInterval(0)); BSLS_ASSERT_SAFE(peakRateWindow() > bsls::TimeInterval(0)); BSLS_ASSERT_SAFE(peakRateLimit() == 1 || peakRateWindow() <= LeakyBucket::calculateDrainTime( ULLONG_MAX, peakRateLimit(), true)); BSLS_ASSERT_SAFE(sustainedRateLimit() == 1 || sustainedRateWindow() <= LeakyBucket::calculateDrainTime( ULLONG_MAX, sustainedRateLimit(), true)); }
void RbTreeUtil::remove(RbTreeAnchor *tree, RbTreeNode *node) { BSLS_ASSERT(0 != node); BSLS_ASSERT(0 != tree); BSLS_ASSERT(0 != tree->rootNode()); RbTreeNode *x, *y; RbTreeNode *parentOfX; bool yIsBlackFlag; // Implementation Note: This implementation has been adjusted from the // one described in "Introduction to Algorithms" [Cormen, Leiserson, // Rivest] (i.e., CLR) to avoid swapping node values (swapping nodes is // potentially inefficient and inappropriate for an STL map). // Specifically, int case where 'node' has two (non-null) children, CLR // (confusingly) swaps the value of the node with its replacement; instead // we move node's successor to the position of node, and then recolor its // value with the same result). // Case 1: If either child of the node being removed is 0, then 'node' can // be replaced by its non-null child (or by a null child if 'node' has no // children). if (0 == node->leftChild()) { y = node; x = node->rightChild(); } else if (0 == node->rightChild()) { y = node; x = node->leftChild(); } else { // Case 2: Otherwise the 'node' will be replaced by its successor in // the tree. y = leftmost(node->rightChild()); x = y->rightChild(); } yIsBlackFlag = y->isBlack(); if (y == node) { // We should be in case 1, where 'node' has (at least 1) null child, // and will simply be replaced by one of its children. In this // context, 'x' refers to the node that will replace 'node'. Simply // point the parent of 'node' to its new child, 'x'. Note that in // this context, we may have to set the first and last node of the // tree. BSLS_ASSERT_SAFE(0 == node->leftChild() || 0 == node->rightChild()); if (isLeftChild(node)) { // If the node being removed is to the left of its parent, it may // be the first node of the tree. if (node == tree->firstNode()) { tree->setFirstNode(next(node)); } node->parent()->setLeftChild(x); } else { node->parent()->setRightChild(x); } parentOfX = node->parent(); if (x) { x->setParent(node->parent()); } } else { // We should be in case 2, where 'node' has two non-null children. In // this context 'y' is the successor to 'node' which will be used to // replace 'node'. Note that in this context, we never need to set // the first or last node of the tree (as the node being removed has // two children). BSLS_ASSERT_SAFE(0 != node->leftChild() && 0 != node->rightChild()); BSLS_ASSERT_SAFE(0 == y->leftChild()); BSLS_ASSERT_SAFE(x == y->rightChild()); if (isLeftChild(node)) { node->parent()->setLeftChild(y); } else { node->parent()->setRightChild(y); } y->setLeftChild(node->leftChild()); y->leftChild()->setParent(y); if (y->parent() != node) { // The following logic only applies if the replacement node 'y' is // not a direct descendent of the 'node' being replaced, otherwise // it is a degenerate case. BSLS_ASSERT_SAFE(y->parent()->leftChild() == y); parentOfX = y->parent(); y->parent()->setLeftChild(x); // 'x' is y->rightChild() if (x) { x->setParent(y->parent()); } y->setRightChild(node->rightChild()); y->rightChild()->setParent(y); } else { parentOfX = y; } y->setParent(node->parent()); y->setColor(node->color()); } if (yIsBlackFlag) { recolorTreeAfterRemoval(tree, x, parentOfX); } BSLS_ASSERT(!tree->rootNode() || tree->sentinel() == tree->rootNode()->parent()); tree->decrementNumNodes(); }
Blob::~Blob() { BSLS_ASSERT_SAFE(0 == assertInvariants()); }
void baltzo::TimeZoneUtilImp::resolveLocalTime( bdlt::DatetimeTz *result, LocalTimeValidity::Enum *resultValidity, Zoneinfo::TransitionConstIterator *transitionIter, const bdlt::Datetime& localTime, DstPolicy::Enum dstPolicy, const Zoneinfo& timeZone) { BSLS_ASSERT(result); BSLS_ASSERT(resultValidity); BSLS_ASSERT(transitionIter); BSLS_ASSERT_SAFE(ZoneinfoUtil::isWellFormed(timeZone)); BALL_LOG_SET_CATEGORY(LOG_CATEGORY); typedef LocalTimeValidity Validity; // First, determine the transitions that could conceivably apply to // 'localTime', and determine whether 'localTime' is valid and unique, // valid but ambiguous, or invalid. Zoneinfo::TransitionConstIterator iter1, iter2; ZoneinfoUtil::loadRelevantTransitions(&iter1, &iter2, resultValidity, localTime, timeZone); const int utcOffset1 = iter1->descriptor().utcOffsetInSeconds(); const int utcOffset2 = iter2->descriptor().utcOffsetInSeconds(); // Next resolve the UTC offset for 'localTime' based on 'dstPolicy' and // the relevant transitions. int utcOffsetInSeconds; if (dstPolicy != DstPolicy::e_UNSPECIFIED) { // If 'dstPolicy' is DST or STANDARD, select the UTC offset from a // local time descriptor with a matching daylight-saving time // property. const bool isDstOff = dstPolicy == DstPolicy::e_DST; selectUtcOffset(&utcOffsetInSeconds, iter1, iter2, timeZone, isDstOff); } else { // 'dstPolicy' is UNSPECIFIED. Select the UTC offset from the later // relevant transition if the local time is ambiguous or the earlier if // invalid. Note that for valid and unique local times, the returned // iterators are equal so this choice is irrelevant. BSLS_ASSERT(*resultValidity != Validity::e_VALID_UNIQUE || utcOffset1 == utcOffset2); utcOffsetInSeconds = *resultValidity == Validity::e_INVALID ? utcOffset1 : utcOffset2; } // Use the resolved UTC offset to create the resolved UTC value for // 'localTime' const int utcOffsetInMinutes = utcOffsetInSeconds / 60; bdlt::Datetime resolvedUtcTime = localTime; resolvedUtcTime.addMinutes(-utcOffsetInMinutes); // Assign 'transitionIter' to the transition, from the two relevant // transitions determined earlier, describing the properties of local time // at 'resolvedUtcTime'. const bdlt::EpochUtil::TimeT64 resolvedUtcTimeT = Zoneinfo::convertToTimeT64(resolvedUtcTime); *transitionIter = resolvedUtcTimeT < iter2->utcTime() ? iter1 : iter2; // Finally, set 'result' to the local time in the indicated time zone // corresponding to 'resolvedUtcTime'. Note that the corresponding local // time value for 'resolvedUtcTime' may be different than 'localTime'. const int resultOffsetInMinutes = (*transitionIter)->descriptor().utcOffsetInSeconds() / 60; BALL_LOG_TRACE << "[ Input = " << localTime << " Validity = " << *resultValidity << " DstPolicy = " << dstPolicy << " AppliedOffset = " << resultOffsetInMinutes << " ]" << BALL_LOG_END; bdlt::Datetime resultTime = resolvedUtcTime; resultTime.addMinutes(resultOffsetInMinutes); result->setDatetimeTz(resultTime, resultOffsetInMinutes); }
// ACCESSORS bsl::ostream& Date::print(bsl::ostream& stream, int level, int spacesPerLevel) const { if (BSLS_PERFORMANCEHINT_PREDICT_UNLIKELY(stream.bad())) { BSLS_PERFORMANCEHINT_UNLIKELY_HINT; return stream; // RETURN } // Typical space usage (10 bytes): ddMMMyyyy nil. Reserve 128 bytes for // possible BAD DATE result, which is sufficient space for the bad date // verbose message even on a 64-bit system. char buffer[128]; #if defined(BSLS_ASSERT_OPT_IS_ACTIVE) if (BSLS_PERFORMANCEHINT_PREDICT_UNLIKELY( !Date::isValidSerial(d_serialDate))) { BSLS_PERFORMANCEHINT_UNLIKELY_HINT; #if defined(BSLS_PLATFORM_CMP_MSVC) #define snprintf _snprintf #endif snprintf(buffer, sizeof buffer, "*BAD*DATE:%p->d_serialDate=%d", this, d_serialDate); #if defined(BSLS_ASSERT_SAFE_IS_ACTIVE) BSLS_LOG("'bdlt::Date' precondition violated: %s.", buffer); #endif BSLS_ASSERT_SAFE( !"'bdlt::Date::print' attempted on date with invalid state."); } else { #endif // defined(BSLS_ASSERT_OPT_IS_ACTIVE) int y, m, d; getYearMonthDay(&y, &m, &d); const char *const month = months[m]; buffer[0] = static_cast<char>(d / 10 + '0'); buffer[1] = static_cast<char>(d % 10 + '0'); buffer[2] = month[0]; buffer[3] = month[1]; buffer[4] = month[2]; buffer[5] = static_cast<char>( y / 1000 + '0'); buffer[6] = static_cast<char>(((y % 1000) / 100) + '0'); buffer[7] = static_cast<char>(((y % 100) / 10) + '0'); buffer[8] = static_cast<char>( y % 10 + '0'); buffer[9] = 0; #if defined(BSLS_ASSERT_OPT_IS_ACTIVE) } #endif // defined(BSLS_ASSERT_OPT_IS_ACTIVE) bslim::Printer printer(&stream, level, spacesPerLevel); printer.start(true); // 'true' -> suppress '[' stream << buffer; printer.end(true); // 'true' -> suppress ']' return stream; }