Beispiel #1
0
CachingReaderChunkForOwner* CachingReader::allocateChunkExpireLRU(SINT chunkIndex) {
    CachingReaderChunkForOwner* pChunk = allocateChunk(chunkIndex);
    if (pChunk == nullptr) {
        if (m_lruCachingReaderChunk == nullptr) {
            qDebug() << "ERROR: No LRU chunk to free in allocateChunkExpireLRU.";
            return nullptr;
        }
        freeChunk(m_lruCachingReaderChunk);
        pChunk = allocateChunk(chunkIndex);
    }
    //qDebug() << "allocateChunkExpireLRU" << chunk << pChunk;
    return pChunk;
}
Beispiel #2
0
Chunk* CachingReader::allocateChunkExpireLRU() {
    Chunk* chunk = allocateChunk();
    if (chunk == NULL) {
        if (m_lruChunk == NULL) {
            qDebug() << "ERROR: No LRU chunk to free in allocateChunkExpireLRU.";
            return NULL;
        }
        //qDebug() << "Expiring LRU" << m_lruChunk << m_lruChunk->chunk_number;
        freeChunk(m_lruChunk);
        chunk = allocateChunk();
    }
    //qDebug() << "allocateChunkExpireLRU" << chunk;
    return chunk;
}
Beispiel #3
0
ObjectPool<T>::ObjectPool(int chunkSize) throw(std::invalid_argument, std::bad_alloc) : chunkSize(chunkSize)
{
    if (chunkSize <= 0)
    {
        throw std::invalid_argument("chunk size must be positive");
    }
    // create chunckSize objects to start
    allocateChunk();
}
Beispiel #4
0
T& ObjectPool<T>::acquireObject()
{
    if (freeList.empty()) {
        allocateChunk();
    }
    T* obj = freeList.front();
    freeList.pop();
    return (*obj);
}
Beispiel #5
0
      void*
      malloc ()
      {
        if (freeList.empty ())
          {
            allocateChunk ();
          }

        FreeListNode *node = &freeList.front ();
        freeList.pop_front ();
        node->~slist_base_hook<> ();
        ++mallocCount;
        return reinterpret_cast<void*> (node);
      }
Beispiel #6
0
// clear all symbols from the table.
void MLSymbolTable::clear()
{
	MLScopedLock lock(mLock);

	mSize = 0;
	mCapacity = 0;
	
	mSymbolsByID.clear();
	
#if USE_ALPHA_SORT	
	mAlphaOrderByID.clear();
	mSymbolsByAlphaOrder.clear();
#endif
	mHashTable.clear();
	mHashTable.resize(kHashTableSize);
	allocateChunk();
	addEntry("", 0);
}
Beispiel #7
0
// add an entry to the table. The entry must not already exist in the table.
// this must be the only way of modifying the symbol table.
int MLSymbolTable::addEntry(const char * sym, int len)
{
	int newID = mSize;	
	
	if(mSize >= mCapacity)
	{
		allocateChunk();
	}
	
	mSymbolsByID[newID] = sym;

#if USE_ALPHA_SORT	
	// store symbol in set to get alphabetically sorted index of new entry.
	auto insertReturnVal = mSymbolsByAlphaOrder.insert(mSymbolsByID[newID]); 
	auto newEntryIter = insertReturnVal.first;
	auto beginIter = mSymbolsByAlphaOrder.begin();
	int newIndex = distance(beginIter, newEntryIter);
	
	// make new index list entry
	mAlphaOrderByID[newID] = newIndex;

	// insert into alphabetical order list
	for(int i=0; i<newID; ++i)
	{
		if (mAlphaOrderByID[i] >= newIndex)
		{
			mAlphaOrderByID[i]++;
		}
	}
#endif 
	
	int hash = KRhash(sym);
	mHashTable[hash].push_back(newID);	
	mSize++;
	return newID;
}
Beispiel #8
0
 FixedSizePool () :
     mallocCount (0), freeCount (0), chunks (), freeList ()
 {
   allocateChunk ();
 }
ESFError ESFDiscardAllocator::allocate(void **block, ESFUWord size) {
    if (!block) {
        return ESF_NULL_POINTER;
    }

    if (1 > size) {
        return ESF_INVALID_ARGUMENT;
    }

    ESFError error = 0;

    //
    // If asked for a block of memory larger than the chunk size, bypass
    // the chunks and allocate memory directly from the source allocator,
    // but remember the allocated memory so it can be freed when this
    // allocator is destroyed.
    //

    if (size > _chunkSize) {
        Chunk *chunk = 0;

        error = allocateChunk(&chunk, size);

        if (ESF_SUCCESS != error) {
            return error;
        }

        chunk->_idx = chunk->_size;

        if (_head) {
            chunk->_next = _head->_next;
            _head->_next = chunk;
        } else {
            _head = chunk;
        }

        *block = chunk->_data;

        return ESF_SUCCESS;
    }

    if (!_head) {
        error = allocateChunk(&_head, _chunkSize);

        if (ESF_SUCCESS != error) {
            return error;
        }
    }

    ESF_ASSERT(_head);

    if (size > (_head->_idx > _head->_size ? 0 : _head->_size - _head->_idx)) {
        Chunk *oldHead = _head;

        error = allocateChunk(&_head, _chunkSize);

        if (ESF_SUCCESS != error) {
            return error;
        }

        _head->_next = oldHead;
    }

    *block = _head->_data + _head->_idx;

    // Always keep the next available block word-aligned
    _head->_idx += ESF_WORD_ALIGN(size);

    return ESF_SUCCESS;
}