Ejemplo n.º 1
0
LargeMemoryBlock *ExtMemoryPool::mallocLargeObject(size_t allocationSize)
{
#if __TBB_MALLOC_LOCACHE_STAT
    AtomicIncrement(mallocCalls);
    AtomicAdd(memAllocKB, allocationSize/1024);
#endif
    LargeMemoryBlock* lmb = loc.get(allocationSize);
    if (!lmb) {
        BackRefIdx backRefIdx = BackRefIdx::newBackRef(/*largeObj=*/true);
        if (backRefIdx.isInvalid())
            return NULL;

        // unalignedSize is set in getLargeBlock
        lmb = backend.getLargeBlock(allocationSize);
        if (!lmb) {
            removeBackRef(backRefIdx);
            loc.rollbackCacheState(allocationSize);
            return NULL;
        }
        lmb->backRefIdx = backRefIdx;
        STAT_increment(getThreadId(), ThreadCommonCounters, allocNewLargeObj);
    } else {
#if __TBB_MALLOC_LOCACHE_STAT
        AtomicIncrement(cacheHits);
        AtomicAdd(memHitKB, allocationSize/1024);
#endif
    }
    return lmb;
}
Ejemplo n.º 2
0
void ExtMemoryPool::freeLargeObject(void *object)
{
    LargeObjectHdr *header = (LargeObjectHdr*)object - 1;

    // overwrite backRefIdx to simplify double free detection
    header->backRefIdx = BackRefIdx();
    if (!loc.put(this, header->memoryBlock)) {
        removeBackRef(header->memoryBlock->backRefIdx);
        backend.putLargeBlock(header->memoryBlock);
        STAT_increment(getThreadId(), ThreadCommonCounters, freeLargeObj);
    }
}
Ejemplo n.º 3
0
bool LargeObjectCache::put(ExtMemoryPool *extMemPool, LargeMemoryBlock *largeBlock)
{
    size_t idx = sizeToIdx(largeBlock->unalignedSize);
    if (idx<numLargeBlockBins) {
        MALLOC_ITT_SYNC_RELEASING(bin+idx);
        if (bin[idx].put(extMemPool, largeBlock)) {
            STAT_increment(getThreadId(), ThreadCommonCounters, cacheLargeBlk);
            return true;
        } else
            return false;
    }
    return false;
}
Ejemplo n.º 4
0
LargeMemoryBlock *LargeObjectCache::get(ExtMemoryPool *extMemPool, size_t size)
{
    MALLOC_ASSERT( size%largeBlockCacheStep==0, ASSERT_TEXT );
    LargeMemoryBlock *lmb = NULL;
    size_t idx = sizeToIdx(size);
    if (idx<numLargeBlockBins) {
        lmb = bin[idx].get(extMemPool, size);
        if (lmb) {
            MALLOC_ITT_SYNC_ACQUIRED(bin+idx);
            STAT_increment(getThreadId(), ThreadCommonCounters, allocCachedLargeBlk);
        }
    }
    return lmb;
}
Ejemplo n.º 5
0
LargeMemoryBlock *LargeObjectCacheImpl<Props>::get(uintptr_t currTime, size_t size)
{
    MALLOC_ASSERT( size%Props::CacheStep==0, ASSERT_TEXT );
    int idx = sizeToIdx(size);
    bool setNonEmpty = false;

    LargeMemoryBlock *lmb = bin[idx].get(size, currTime, &setNonEmpty);
    // Setting to true is possible out of lock. As bitmask is used only for cleanup,
    // the lack of consistency is not violating correctness here.
    if (setNonEmpty)
        bitMask.set(idx, true);
    if (lmb) {
        MALLOC_ITT_SYNC_ACQUIRED(bin+idx);
        STAT_increment(getThreadId(), ThreadCommonCounters, allocCachedLargeObj);
    }
    return lmb;
}
Ejemplo n.º 6
0
void *ExtMemoryPool::mallocLargeObject(size_t size, size_t alignment)
{
    size_t headersSize = sizeof(LargeMemoryBlock)+sizeof(LargeObjectHdr);
    // TODO: take into account that they are already largeObjectAlignment-aligned
    size_t allocationSize = alignUp(size+headersSize+alignment, largeBlockCacheStep);

    if (allocationSize < size) // allocationSize is wrapped around after alignUp
        return NULL;

    LargeMemoryBlock* lmb = loc.get(this, allocationSize);
    if (!lmb) {
        BackRefIdx backRefIdx = BackRefIdx::newBackRef(/*largeObj=*/true);
        if (backRefIdx.isInvalid())
            return NULL;

        // unalignedSize is set in getLargeBlock
        lmb = backend.getLargeBlock(allocationSize);
        if (!lmb) {
            removeBackRef(backRefIdx);
            return NULL;
        }
        lmb->backRefIdx = backRefIdx;
        STAT_increment(getThreadId(), ThreadCommonCounters, allocNewLargeObj);
    }

    void *alignedArea = (void*)alignUp((uintptr_t)lmb+headersSize, alignment);
    LargeObjectHdr *header = (LargeObjectHdr*)alignedArea-1;
    header->memoryBlock = lmb;
    header->backRefIdx = lmb->backRefIdx;
    setBackRef(header->backRefIdx, header);

    lmb->objectSize = size;

    MALLOC_ASSERT( isLargeObject(alignedArea), ASSERT_TEXT );
    return alignedArea;
}
Ejemplo n.º 7
0
LargeMemoryBlock *LargeObjectCacheImpl<Props>::CacheBin::
    putList(ExtMemoryPool *extMemPool, LargeMemoryBlock *head, BinBitMask *bitMask, int idx)
{
    int i, num, totalNum;
    size_t size = head->unalignedSize;
    LargeMemoryBlock *curr, *tail, *toRelease = NULL;
    uintptr_t currTime;

    // we not kept prev pointers during assigning blocks to bins, set them now
    head->prev = NULL;
    for (num=1, curr=head; curr->next; num++, curr=curr->next)
        curr->next->prev = curr;
    tail = curr;
    totalNum = num;

    {
        MallocMutex::scoped_lock scoped_cs(lock);
        usedSize -= num*size;
        // to keep ordering on list, get time under list lock
        currTime = extMemPool->loc.getCurrTimeRange(num);

        for (curr=tail, i=0; curr; curr=curr->prev, i++) {
            curr->age = currTime+i;
            STAT_increment(getThreadId(), ThreadCommonCounters, cacheLargeObj);
        }

        if (!lastCleanedAge) {
            // 1st object of such size was released.
            // Not cache it, and remeber when this occurs
            // to take into account during cache miss.
            lastCleanedAge = tail->age;
            toRelease = tail;
            tail = tail->prev;
            if (tail)
                tail->next = NULL;
            else
                head = NULL;
            num--;
        }
        if (num) {
            // add [head;tail] list to cache
            tail->next = first;
            if (first)
                first->prev = tail;
            first = head;
            if (!last) {
                MALLOC_ASSERT(0 == oldest, ASSERT_TEXT);
                oldest = tail->age;
                last = tail;
            }

            cachedSize += num*size;
        }
/* It's accebtable, if a bin is empty, and we have non-empty in bit mask.
   So set true in bitmask without lock.
   It's not acceptable, if a bin is non-empty and we have empty in bitmask.
   So set false in bitmask under lock. */

        // No used object, and nothing in the bin, mark the bin as empty
        if (!usedSize && !first)
            bitMask->set(idx, false);
    }
    extMemPool->loc.cleanupCacheIfNeededOnRange(totalNum, currTime);
    if (toRelease)
        toRelease->prev = toRelease->next = NULL;
    return toRelease;
}