void LargeObjectCacheImpl<Props>::putList(ExtMemoryPool *extMemPool, LargeMemoryBlock *toCache)
{
    int toBinIdx = sizeToIdx(toCache->unalignedSize);

    MALLOC_ITT_SYNC_RELEASING(bin+toBinIdx);
    if (LargeMemoryBlock *release = bin[toBinIdx].putList(extMemPool, toCache,
                                                          &bitMask, toBinIdx))
        extMemPool->backend.returnLargeObject(release);
}
Exemple #2
0
bool LargeObjectCache::put(ExtMemoryPool *extMemPool, LargeMemoryBlock *largeBlock)
{
    size_t idx = sizeToIdx(largeBlock->unalignedSize);
    if (idx<numLargeBlockBins) {
        MALLOC_ITT_SYNC_RELEASING(bin+idx);
        if (bin[idx].put(extMemPool, largeBlock)) {
            STAT_increment(getThreadId(), ThreadCommonCounters, cacheLargeBlk);
            return true;
        } else
            return false;
    }
    return false;
}
Exemple #3
0
LargeMemoryBlock *LargeObjectCache::get(ExtMemoryPool *extMemPool, size_t size)
{
    MALLOC_ASSERT( size%largeBlockCacheStep==0, ASSERT_TEXT );
    LargeMemoryBlock *lmb = NULL;
    size_t idx = sizeToIdx(size);
    if (idx<numLargeBlockBins) {
        lmb = bin[idx].get(extMemPool, size);
        if (lmb) {
            MALLOC_ITT_SYNC_ACQUIRED(bin+idx);
            STAT_increment(getThreadId(), ThreadCommonCounters, allocCachedLargeBlk);
        }
    }
    return lmb;
}
void LargeObjectCache::putList(LargeMemoryBlock *list)
{
    LargeMemoryBlock *toProcess, *n;

    for (LargeMemoryBlock *curr = list; curr; curr = toProcess) {
        LargeMemoryBlock *tail = curr;
        toProcess = curr->next;
        if (curr->unalignedSize >= maxHugeSize) {
            extMemPool->backend.returnLargeObject(curr);
            continue;
        }
        int currIdx = sizeToIdx(curr->unalignedSize);

        // Find all blocks fitting to same bin. Not use more efficient sorting
        // algorithm because list is short (commonly,
        // LocalLOC's HIGH_MARK-LOW_MARK, i.e. 24 items).
        for (LargeMemoryBlock *b = toProcess; b; b = n) {
            n = b->next;
            if (sizeToIdx(b->unalignedSize) == currIdx) {
                tail->next = b;
                tail = b;
                if (toProcess == b)
                    toProcess = toProcess->next;
                else {
                    b->prev->next = b->next;
                    if (b->next)
                        b->next->prev = b->prev;
                }
            }
        }
        tail->next = NULL;
        if (curr->unalignedSize < maxLargeSize)
            largeCache.putList(extMemPool, curr);
        else
            hugeCache.putList(extMemPool, curr);
    }
}
LargeMemoryBlock *LargeObjectCacheImpl<Props>::get(uintptr_t currTime, size_t size)
{
    MALLOC_ASSERT( size%Props::CacheStep==0, ASSERT_TEXT );
    int idx = sizeToIdx(size);
    bool setNonEmpty = false;

    LargeMemoryBlock *lmb = bin[idx].get(size, currTime, &setNonEmpty);
    // Setting to true is possible out of lock. As bitmask is used only for cleanup,
    // the lack of consistency is not violating correctness here.
    if (setNonEmpty)
        bitMask.set(idx, true);
    if (lmb) {
        MALLOC_ITT_SYNC_ACQUIRED(bin+idx);
        STAT_increment(getThreadId(), ThreadCommonCounters, allocCachedLargeObj);
    }
    return lmb;
}
void LargeObjectCacheImpl<Props>::rollbackCacheState(size_t size)
{
    int idx = sizeToIdx(size);
    MALLOC_ASSERT(idx<numBins, ASSERT_TEXT);
    bin[idx].decrUsedSize(size, &bitMask, idx);
}