// ACCESSORS
bsl::size_t FixedQueueIndexManager::length() const
{
    // Note that 'FixedQueue::pushBack' and 'FixedQueue::popFront' rely on the
    // fact that the following atomic loads are sequentially consistent.  If
    // this were to change, 'FixedQueue::tryPushBack' and
    // 'FixedQueue::tryPopFront' would need to be modified.

    unsigned int combinedPushIndex = discardDisabledFlag(d_pushIndex);
    unsigned int combinedPopIndex  = d_popIndex;

    // Note that the following is logically equivalent to:
    //..
    // int difference = circularDifference(combinedPushIndex,
    //                                     combinedPopIndex,
    //                                     d_maxCombinedIndex + 1);
    //
    // if      (difference <  0)          { return 0; }
    // else if (difference >= d_capacity) { return d_capacity; }
    // return difference;
    //..
    // However we can perform some minor optimization knowing that
    // 'combinedPushIndex' was loaded (with sequential consistency) *before*
    // 'combinedPopIndex' so it is not possible for the 'difference' to be
    // greater than 'd_capacity' unless 'combinedPopIndex' has wrapped around
    // 'd_maxCombinedIndex' and the length is 0.

    int difference = combinedPushIndex - combinedPopIndex;
    if (difference >= 0) {
        if (BSLS_PERFORMANCEHINT_PREDICT_UNLIKELY(
                difference > static_cast<int>(d_capacity))) {
            // Because the pop index is acquired after the push index, it's
            // possible for the push index to be immediately before
            // 'd_maxCombinedIndex' and then for 'combinedPopIndex' to be
            // acquired after it wraps around to 0, resulting in a very large
            // positive value.

            BSLS_ASSERT(0 > circularDifference(combinedPushIndex,
                                               combinedPopIndex,
                                               d_maxCombinedIndex + 1));

            return 0;                                                 // RETURN
        }
        return static_cast<bsl::size_t>(difference);                  // RETURN
    }

    if (BSLS_PERFORMANCEHINT_PREDICT_UNLIKELY(
            difference < -static_cast<int>(d_maxCombinedIndex / 2))) {
        BSLS_ASSERT(0 < circularDifference(combinedPushIndex,
                                           combinedPopIndex,
                                           d_maxCombinedIndex + 1));

        difference += d_maxCombinedIndex + 1;
        return bsl::min(static_cast<bsl::size_t>(difference), d_capacity);
                                                                      // RETURN
    }
    return 0;
}
void GuardingAllocator::deallocate(void *address)
{
    if (BSLS_PERFORMANCEHINT_PREDICT_UNLIKELY(0 == address)) {
        BSLS_PERFORMANCEHINT_UNLIKELY_HINT;
        return;                                                       // RETURN
    }

    const int pageSize = getSystemPageSize();

    void *firstPage;  // address of the first page of the allocation
    void *guardPage;  // address of the guard page

    if (e_BEFORE_USER_BLOCK == d_guardPageLocation) {
        // The memory page before the block returned to the user is protected.

        firstPage = static_cast<char *>(address) - pageSize;
        guardPage = firstPage;
    }
    else {
        // The memory page after the block returned to the user is protected.

        firstPage = *(void **)(static_cast<char *>(address) - OFFSET);
        guardPage = *(void **)(static_cast<char *>(address) - OFFSET * 2);
    }

    // Unprotect the guard page and free the memory.

    const int rc = systemUnprotect(guardPage, pageSize);
    (void)rc;

    BSLS_ASSERT_OPT(0 == rc);

    systemFree(firstPage);
}
void MultipoolAllocator::reserveCapacity(size_type size, size_type numObjects)
{
    if (BSLS_PERFORMANCEHINT_PREDICT_UNLIKELY(0 == size)) {
        BSLS_PERFORMANCEHINT_UNLIKELY_HINT;
        return;                                                       // RETURN
    }

    d_multipool.reserveCapacity(size, numObjects);
}
// MANIPULATORS
void *MultipoolAllocator::allocate(size_type size)
{
    if (BSLS_PERFORMANCEHINT_PREDICT_UNLIKELY(0 == size)) {
        BSLS_PERFORMANCEHINT_UNLIKELY_HINT;
        return 0;                                                     // RETURN
    }

    return d_multipool.allocate(size);
}
// MANIPULATORS
void *BufferedSequentialAllocator::allocate(size_type size)
{
    if (BSLS_PERFORMANCEHINT_PREDICT_UNLIKELY(0 == size)) {
        BSLS_PERFORMANCEHINT_UNLIKELY_HINT;
        return 0;                                                     // RETURN
    }

    return d_pool.allocate(size);
}
void SequentialAllocator::reserveCapacity(int numBytes)
{
    BSLS_ASSERT(0 <= numBytes);

    if (BSLS_PERFORMANCEHINT_PREDICT_UNLIKELY(0 == numBytes)) {
        BSLS_PERFORMANCEHINT_UNLIKELY_HINT;
        return;                                                       // RETURN
    }

    d_sequentialPool.reserveCapacity(numBytes);
}
void *SequentialAllocator::allocateAndExpand(size_type *size)
{
    BSLS_ASSERT(size);

    if (BSLS_PERFORMANCEHINT_PREDICT_UNLIKELY(0 == *size)) {
        BSLS_PERFORMANCEHINT_UNLIKELY_HINT;
        return 0;                                                     // RETURN
    }

    return d_sequentialPool.allocateAndExpand(size);
}
示例#8
0
void CountingAllocator::deallocate(void *address)
{
    if (BSLS_PERFORMANCEHINT_PREDICT_UNLIKELY(0 == address)) {
        BSLS_PERFORMANCEHINT_UNLIKELY_HINT;
        return;                                                       // RETURN
    }

    address = static_cast<char *>(address) - OFFSET;

    const size_type recordedSize = *static_cast<size_type *>(address);

    d_numBytesInUse.addRelaxed(-static_cast<bsls::Types::Int64>(recordedSize));

    d_allocator_p->deallocate(address);
}
示例#9
0
// MANIPULATORS
void *CountingAllocator::allocate(bsls::Types::size_type size)
{
    if (BSLS_PERFORMANCEHINT_PREDICT_UNLIKELY(0 == size)) {
        BSLS_PERFORMANCEHINT_UNLIKELY_HINT;
        return 0;                                                     // RETURN
    }

    // Round up 'size' for maximal alignment and add sufficient space to record
    // 'size' in the allocated block.

    const bsls::Types::size_type totalSize =
                 bsls::AlignmentUtil::roundUpToMaximalAlignment(size) + OFFSET;

    void *address = d_allocator_p->allocate(totalSize);

    d_numBytesInUse.addRelaxed(static_cast<bsls::Types::Int64>(size));
    d_numBytesTotal.addRelaxed(static_cast<bsls::Types::Int64>(size));

    *static_cast<bsls::Types::size_type *>(address) = size;

    return static_cast<char *>(address) + OFFSET;
}
示例#10
0
// PRIVATE MANIPULATORS
void FixedThreadPool::processJobs()
{
    while (BSLS_PERFORMANCEHINT_PREDICT_LIKELY(
                                        e_RUN == d_control.loadRelaxed())) {
        Job functor;

        if (BSLS_PERFORMANCEHINT_PREDICT_UNLIKELY(
                                              d_queue.tryPopFront(&functor))) {
            BSLS_PERFORMANCEHINT_UNLIKELY_HINT;

            ++d_numThreadsWaiting;

            if (e_RUN == d_control && d_queue.isEmpty()) {
                d_queueSemaphore.wait();
            }

            d_numThreadsWaiting.addRelaxed(-1);
        }
        else {
            functor();
        }
    }
}
示例#11
0
void testUsageExample1(int argc, bool assert)
{
    int verbose = argc > 2;
    int veryVerbose = argc > 3;
    int veryVeryVerbose = argc > 4;

    double tolerance = 0.05;

    (void) assert;
    (void) verbose;
    (void) veryVerbose;
    (void) veryVeryVerbose;
    (void) tolerance;

    Stopwatch timer;

    timer.reset();

    if (veryVerbose) {
        printf("BSLS_PERFORMANCEHINT_PREDICT_LIKELY\n");
    }

    timer.start();

    for (int x = 0; x < TESTSIZE; ++x) {
        int y = rand() % 100;

        // Incorrect usage of 'BSLS_PERFORMANCEHINT_PREDICT_LIKELY' since there
        // is only a one in 100 chance that this branch is taken.

        if (BSLS_PERFORMANCEHINT_PREDICT_LIKELY(y == 8)) {
            foo();
        }
        else {
            BSLS_PERFORMANCEHINT_UNLIKELY_HINT;
            bar();
        }
    }

    timer.stop();
    double likelyTime = timer.elapsedTime();

    if (veryVerbose) {
        P(likelyTime);
    }


    if (veryVerbose) {
        printf("BSLS_PERFORMANCEHINT_PREDICT_UNLIKELY\n");
    }

    timer.reset();
    timer.start();

    for (int x = 0; x < TESTSIZE; ++x) {
        int y = rand() % 100;

        // Correct usage of 'BSLS_PERFORMANCEHINT_PREDICT_UNLIKELY' since there
        // is only a one in 100 chance that this branch is taken.

        if (BSLS_PERFORMANCEHINT_PREDICT_UNLIKELY(y == 8)) {
            BSLS_PERFORMANCEHINT_UNLIKELY_HINT;
            foo();
        }
        else {
            bar();
        }
    }
    timer.stop();
    double unlikelyTime = timer.elapsedTime();

    if (veryVerbose) {
        P(unlikelyTime);
    }

#if defined(BDE_BUILD_TARGET_OPT)
    // Only check under optimized build.

#if defined(BSLS_PLATFORM_CMP_CLANG)                                          \
 || defined(BSLS_PLATFORM_CMP_GNU)                                            \
 || defined(BSLS_PLATFORM_CMP_SUN)                                            \
 || (defined(BSLS_PLATFORM_CMP_IBM) && BSLS_PLATFORM_CMP_VERSION >= 0x0900)
    // Only check when 'BSLS_PERFORMANCEHINT_PREDICT_LIKELY' and
    // 'BSLS_PERFORMANCEHINT_PREDICT_UNLIKELY' expands into something
    // meaningful.

    if (assert) {
        LOOP2_ASSERT(likelyTime, unlikelyTime,
                     likelyTime + tolerance > unlikelyTime);
    }

#endif

#endif

}
示例#12
0
// MANIPULATORS
void *GuardingAllocator::allocate(size_type size)
{
    if (BSLS_PERFORMANCEHINT_PREDICT_UNLIKELY(0 == size)) {
        BSLS_PERFORMANCEHINT_UNLIKELY_HINT;
        return 0;                                                     // RETURN
    }

    const size_type paddedSize =
                          bsls::AlignmentUtil::roundUpToMaximalAlignment(size);

    // Adjust for additional memory needed to stash reference addresses when
    // 'e_AFTER_USER_BLOCK' is in use.

    const int adjustedSize = e_AFTER_USER_BLOCK == d_guardPageLocation
                             ? paddedSize + OFFSET * 2
                             : paddedSize;

    // Calculate the number of pages to allocate, *not* counting the guard
    // page.

    const int pageSize = getSystemPageSize();
    const int numPages = (adjustedSize + pageSize - 1) / pageSize;

    const size_type totalSize = (numPages + 1) * pageSize;  // add 1 for guard

    void *firstPage = systemAlloc(totalSize);

    if (!firstPage) {
#ifdef BDE_BUILD_TARGET_EXC
        BSLS_THROW(bsl::bad_alloc());
#else
        return 0;                                                     // RETURN
#endif
    }

    void *userAddress;  // address to return to the caller
    void *guardPage;    // address of the guard page for this allocation

    if (e_BEFORE_USER_BLOCK == d_guardPageLocation) {
        // Protect the memory page before the block returned to the user.

        guardPage   = firstPage;
        userAddress = static_cast<char *>(firstPage) + pageSize;
    }
    else {
        // Protect the memory page after the block returned to the user.

        guardPage   = static_cast<char *>(firstPage) + (numPages * pageSize);
        userAddress = static_cast<char *>(guardPage) - paddedSize;

        // Stash the reference addresses required by 'deallocate'.

        *(void **)(static_cast<char *>(userAddress) - OFFSET)     = firstPage;
        *(void **)(static_cast<char *>(userAddress) - OFFSET * 2) = guardPage;
    }

    // Protect the guard page from read/write access.

    if (0 != systemProtect(guardPage, pageSize)) {
        systemFree(firstPage);
#ifdef BDE_BUILD_TARGET_EXC
        BSLS_THROW(bsl::bad_alloc());
#else
        return 0;                                                     // RETURN
#endif
    }

    return userAddress;
}
// ACCESSORS
bsl::ostream& Date::print(bsl::ostream& stream,
                          int           level,
                          int           spacesPerLevel) const
{
    if (BSLS_PERFORMANCEHINT_PREDICT_UNLIKELY(stream.bad())) {
        BSLS_PERFORMANCEHINT_UNLIKELY_HINT;
        return stream;                                                // RETURN
    }

    // Typical space usage (10 bytes): ddMMMyyyy nil.  Reserve 128 bytes for
    // possible BAD DATE result, which is sufficient space for the bad date
    // verbose message even on a 64-bit system.

    char buffer[128];

#if defined(BSLS_ASSERT_OPT_IS_ACTIVE)

    if (BSLS_PERFORMANCEHINT_PREDICT_UNLIKELY(
                !Date::isValidSerial(d_serialDate))) {
        BSLS_PERFORMANCEHINT_UNLIKELY_HINT;

#if defined(BSLS_PLATFORM_CMP_MSVC)
#define snprintf _snprintf
#endif

        snprintf(buffer,
                 sizeof buffer,
                 "*BAD*DATE:%p->d_serialDate=%d",
                 this,
                 d_serialDate);

#if defined(BSLS_ASSERT_SAFE_IS_ACTIVE)
        BSLS_LOG("'bdlt::Date' precondition violated: %s.", buffer);
#endif
        BSLS_ASSERT_SAFE(
            !"'bdlt::Date::print' attempted on date with invalid state.");
    }
    else {
#endif  // defined(BSLS_ASSERT_OPT_IS_ACTIVE)

        int y, m, d;
        getYearMonthDay(&y, &m, &d);

        const char *const month = months[m];

        buffer[0] = static_cast<char>(d / 10 + '0');
        buffer[1] = static_cast<char>(d % 10 + '0');
        buffer[2] = month[0];
        buffer[3] = month[1];
        buffer[4] = month[2];
        buffer[5] = static_cast<char>(  y / 1000         + '0');
        buffer[6] = static_cast<char>(((y % 1000) / 100) + '0');
        buffer[7] = static_cast<char>(((y %  100) /  10) + '0');
        buffer[8] = static_cast<char>(  y %   10         + '0');
        buffer[9] = 0;

#if defined(BSLS_ASSERT_OPT_IS_ACTIVE)
    }
#endif  // defined(BSLS_ASSERT_OPT_IS_ACTIVE)

    bslim::Printer printer(&stream, level, spacesPerLevel);
    printer.start(true);  // 'true' -> suppress '['
    stream << buffer;
    printer.end(true);    // 'true' -> suppress ']'

    return stream;
}
示例#14
0
// MANIPULATORS
void *ConcurrentPool::allocate()
{
    Link *p;
    for (;;) {
        p = d_freeList.loadRelaxed();
        if (BSLS_PERFORMANCEHINT_PREDICT_UNLIKELY(!p)) {
            BSLS_PERFORMANCEHINT_UNLIKELY_HINT;
            bslmt::LockGuard<bslmt::Mutex> guard(&d_mutex);
            p = d_freeList;
            if (!p) {
                replenish();
                continue;
            }
        }

        if (BSLS_PERFORMANCEHINT_PREDICT_UNLIKELY
                  (2 != bsls::AtomicOperations::addIntNv(&p->d_refCount, 2))) {
            BSLS_PERFORMANCEHINT_UNLIKELY_HINT;
            for (int i = 0; i < 3; ++i) {
                // To avoid unnecessary contention, assume that if we did not
                // get the first reference, then the other thread is about to
                // complete the pop.  Wait for a few cycles until he does.  If
                // he does not complete then go on and try to acquire it
                // ourselves.

                if (d_freeList.loadRelaxed() != p) {
                    break;
                }
            }
        }

        // Force a dependent read of 'd_next_p' to make sure that we're not
        // racing against another thread calling 'deallocate' for 'p' and that
        // checked the refcount *before* we incremented it, put back 'p' in the
        // freelist with a potentially different 'd_next_p'.
        //
        // There are two possibilities in this particular case:
        //   - The following 'loadRelaxed()' will return the new 'freelist'
        //     value (== p) and because of the release barrier before the last
        //     CAS in deallocate, we can observe the new 'd_next_p' value (this
        //     relies on dependent load ordering)
        //   - loadRelaxed() will return the "old" (!= p) and the CAS and thus
        //     the condition will be false.
        //
        // Note that 'h' is made volatile so that the compiler does not replace
        // the 'h->d_inUse' load with 'p->d_inUse' (and thus removing the data
        // dependency).  TBD to be completely thorough 'h->d_next_p' needs a
        // load dependent barrier (no-op on all current architectures though).

        const Link * volatile h = d_freeList.loadRelaxed();

        // gcc 4.3, 4.4 seems to have trouble processing likely(a && b), using
        // likely(a) && likely(b) fixes the problem.  3.4.6 seems to generate
        // the proper code though.

        if (BSLS_PERFORMANCEHINT_PREDICT_LIKELY(h == p)
         && BSLS_PERFORMANCEHINT_PREDICT_LIKELY(
                                d_freeList.testAndSwap(p, h->d_next_p) == p)) {
            break;
        }

        BSLS_PERFORMANCEHINT_UNLIKELY_HINT;
        for (;;) {
            int refCount = bsls::AtomicOperations::getInt(&p->d_refCount);

            if (refCount & 1) {
                if (refCount ==
                        bsls::AtomicOperations::testAndSwapInt(
                            &p->d_refCount,
                            refCount,
                            refCount^1)) {
                    // The node is now free but not on the free list.  Try to
                    // take it.

                    return static_cast<void *>(const_cast<Link **>(
                                                      &p->d_next_p)); // RETURN
                }
            }
            else if (refCount ==
                        bsls::AtomicOperations::testAndSwapInt(
                            &p->d_refCount,
                            refCount,
                            refCount - 2)) {
                break;
            }
        }
    }

    return static_cast<void *>(const_cast<Link **>(&p->d_next_p));
}
int FixedQueueIndexManager::reservePopIndex(unsigned int *generation,
                                            unsigned int *index)
{
    BSLS_ASSERT(0 != generation);
    BSLS_ASSERT(0 != index);

    enum Status { e_SUCCESS = 0, e_QUEUE_EMPTY = 1 };

    unsigned int loadedPopIndex = d_popIndex.load();
    unsigned int savedPopIndex  = -1;

    // We use 'savedPopIndex' to ensure we attempt to acquire an index at least
    // twice before returning 'e_QUEUE_EMPTY'.  This is purely a performance
    // adjustment.

    unsigned int currIndex, currGeneration;

    for (;;) {
        currGeneration = static_cast<unsigned int>(loadedPopIndex
                                                 / d_capacity);
        currIndex      = static_cast<unsigned int>(loadedPopIndex
                                                 % d_capacity);

        // Attempt to swap this cell's state from e_FULL to 'e_READING'

        const int compare = encodeElementState(currGeneration, e_FULL);
        const int swap    = encodeElementState(currGeneration, e_READING);
        const int was     = d_states[currIndex].testAndSwap(compare, swap);

        if (compare == was) {
            // We've successfully changed the state and thus acquired the
            // index.  Exit the loop.

            *generation = currGeneration;
            *index      = currIndex;

            break;
        }

        // We've failed to reserve the index.  We can use the result of the
        // 'testAndSwap' to determine wehther the queue was empty at the point
        // of the 'testAndSwap'.  Either:
        //
        //: 1 The cell is from the previous generation, meaning that we are
        //:   waiting for poppers from the previous generation (queue is
        //:   empty)
        //:
        //: 2 The cell is from the current generation and empty (queue is
        //:   empty)
        //:
        //: 3 The cell is currently being written to (sleep)
        //:
        //: 4 The pop index has been incremented between the value being
        //:   loaded, and the attempt to test and swap.

        unsigned int elementGeneration = decodeGenerationFromElementState(was);
        ElementState state             = decodeStateFromElementState(was);

        int difference = currGeneration - elementGeneration;

        if (difference == 1 || BSLS_PERFORMANCEHINT_PREDICT_UNLIKELY(
            difference == -static_cast<int>(d_maxGeneration))) {
            // The cell is from the previous generation, meaning that we have
            // wrapped around and are attempting to reserve a cell that is
            // still being popped from the previous generation.

            BSLS_ASSERT(state == e_READING);
            BSLS_ASSERT(1 == circularDifference(currGeneration,
                                                elementGeneration,
                                                d_maxGeneration + 1));

            return e_QUEUE_EMPTY;                                     // RETURN
        }
        BSLS_ASSERT(0 >= circularDifference(currGeneration,
                                            elementGeneration,
                                            d_maxGeneration + 1));

        if (0 == difference && e_EMPTY == state) {
            // The cell is empty in the current generation, meaning the queue
            // is empty.

            if (savedPopIndex != loadedPopIndex) {
                // Make two attempts before returning that the queue is empty.

                bslmt::ThreadUtil::yield();
                savedPopIndex = loadedPopIndex;
                loadedPopIndex = d_popIndex.loadRelaxed();
                continue;
            }
            return e_QUEUE_EMPTY;                                     // RETURN
        }

        if (0 != difference || e_WRITING == state) {
            // The cell is currently being written, or our pop index is very
            // out of date.  Delay and try and reserve it again.

            bslmt::ThreadUtil::yield();
            loadedPopIndex = d_popIndex.loadRelaxed();
            continue;
        }

        unsigned int next = nextCombinedIndex(loadedPopIndex);
        loadedPopIndex   = d_popIndex.testAndSwap(loadedPopIndex, next);
    }

    // Attempt to increment the pop index.

    d_popIndex.testAndSwap(loadedPopIndex,
                           nextCombinedIndex(loadedPopIndex));

    return 0;
}