Exemplo n.º 1
0
void AgentService::evict_by_ratio(){
    uint64_t dirty_block_count = this->cct->lru_dirty->get_length();
    uint64_t clean_block_count = this->cct->lru_clean->get_length();
    uint64_t total_cached_block = dirty_block_count + clean_block_count;
    uint64_t total_block_count = cache_total_size / object_size;

    log_print( "AgentService::evict_by_ratio:  current cache ratio:%2.4f \n", ( 1.0*total_cached_block/total_block_count ) );
    if( (1.0*total_cached_block/total_block_count) < cache_ratio_max )
        return;

    boost::upgrade_lock<boost::shared_mutex> lock(this->cct->cachemap_access);
    boost::upgrade_to_unique_lock<boost::shared_mutex> uniqueLock(lock);

    if( (1.0*dirty_block_count/total_block_count) > cache_ratio_health ){
        //because cached blocks are still in lru_dirty, 
        //we need to flush firstly so that we can do evict
        uint64_t need_to_flush_count = dirty_block_count - cache_ratio_health * total_block_count;
        char** c_entry_flush_list = new char* [need_to_flush_count+1]();
        this->cct->lru_dirty->get_keys( &c_entry_flush_list[0], need_to_flush_count, false );
        flush( (CacheEntry**)c_entry_flush_list );
        delete[] c_entry_flush_list;

    }

    uint64_t need_to_evict_count = total_cached_block - cache_ratio_health * total_block_count;
    char** c_entry_list = new char* [need_to_evict_count+1]();
    //memset( c_entry_list, 0, need_to_evict_count+1 );
    this->cct->lru_clean->get_keys( &c_entry_list[0], need_to_evict_count, false );
    evict( (CacheEntry**)c_entry_list );
    delete[] c_entry_list;

}
Exemplo n.º 2
0
/* Allocates an unused user frame.  If there  */
struct frame *frame_alloc() {
  struct frame *f = NULL;
  
  lock_acquire(&ft_lock);
  void *pg = palloc_get_page(PAL_USER);

  if(pg == NULL) {
  	/* If there are no frames left, evict a page. */
    f = get_frame_for_eviction();
    lock_release(&ft_lock);
    lock_acquire(&f->page->moving);//TODO this should never block. need to use a monitor instead to synch between the evictor and call to page_free()
    evict(f);
  } else {
  	/* Initialize new frame. */
    f = malloc(sizeof(struct frame));
    f->addr = pg;
    lock_init(&f->evicting);
    lock_acquire(&f->evicting);

    /* Add frame to frame table. */
    hash_insert(&frame_table, &f->elem);
    lock_release(&ft_lock);
  }
  return f;
}
Exemplo n.º 3
0
int smsa_put_cache_line( SMSA_DRUM_ID drm, SMSA_BLOCK_ID blk, unsigned char *buf ){
    int i;
    for( i = 0; i < num_cacheLines; i++ ) {
        /* Cache NULL? */
        if(cacheLines[i].line == NULL) {
            // Insert into cache
            logMessage(LOG_INFO_LEVEL, "Cache NULL - Writing to cache at: drum[%d]\tblock[%d]\n", drm, blk);
            cacheLines[i].drum = drm;
            cacheLines[i].block = blk;
            cacheLines[i].line = buf;
            // Set timestamp
            gettimeofday(&cacheLines[i].used, NULL);
            // Set lruIndex
            lruIndex = i;
            return 0;
        }
    }
    /* At this point the cache is full and we need to use the LRU cache */
    /* eviction policy to determine where to place the new cache entry  */

    // Eviction notice
    logMessage(LOG_INFO_LEVEL, "Cache full - evicting...", drm, blk);

    return evict( buf, drm, blk );
}
Exemplo n.º 4
0
CEntry*
cachelookup(Cache *c, char *name, int create)
{
	int h;
	CEntry *e;
	
	h = hash(name) % c->nhash;
	for(e=c->hash[h]; e; e=e->hash.next){
		if(strcmp(name, e->name) == 0){
			movetofront(c, e);
			return e;
		}
	}
	
	if(!create)
		return nil;
	
	if(c->nentry >= c->maxentry)
		e = evict(c);
	else{
		e = emalloc(c->sizeofentry);
		insertfront(c, e);
		c->nentry++;
	}
	e->name = estrdup(name);
	h = hash(name) % c->nhash;
	e->hash.next = c->hash[h];
	c->hash[h] = e;
	return e;	
}
Exemplo n.º 5
0
void MemoryCache::remove(Resource* resource)
{
    // The resource may have already been removed by someone other than our caller,
    // who needed a fresh copy for a reload.
    if (MemoryCacheEntry* entry = getEntryForResource(resource))
        evict(entry);
}
Exemplo n.º 6
0
void
EvictorBase::evictServants()
{
    //
    // If the evictor queue has grown larger than the limit,
    // look at the excess elements to see whether any of them
    // can be evicted.
    //
    EvictorQueue::reverse_iterator p = _queue.rbegin();
    int excessEntries = static_cast<int>(_map.size() - _size);

    for(int i = 0; i < excessEntries; ++i)
    {
        EvictorMap::iterator mapPos = *p;
        if(mapPos->second->useCount == 0)
        {
            evict(mapPos->second->servant, mapPos->second->userCookie); // Down-call
            p = EvictorQueue::reverse_iterator(_queue.erase(mapPos->second->queuePos));
            _map.erase(mapPos);
        }
        else
        {
            ++p;
        }
    }
}
Exemplo n.º 7
0
void
umain(int argc, char **argv)
{
	int block=0;
	cprintf("\nevict program called!!!");
	block = evict();
	cprintf("\nFS evicted block:%d\n",block);
}
Exemplo n.º 8
0
 void Pager::clear_cache()
 {
     //clear the list
     while(list_head->next != list_head){
         evict(list_head->next);
     }
     delete list_head;
 }
Exemplo n.º 9
0
void MemoryCache::setDisabled(bool disabled)
{
    m_disabled = disabled;
    if (!m_disabled)
        return;

    for (;;) {
        CachedResourceMap::iterator outerIterator = m_resources.begin();
        if (outerIterator == m_resources.end())
            break;
#if ENABLE(CACHE_PARTITIONING)
        CachedResourceItem::iterator innerIterator = outerIterator->value->begin();
        evict(innerIterator->value);
#else
        evict(outerIterator->value);
#endif
    }
}
Exemplo n.º 10
0
void MemoryCache::evictResources()
{
    for (;;) {
        ResourceMap::iterator i = m_resources.begin();
        if (i == m_resources.end())
            break;
        evict(i->value);
    }
}
Exemplo n.º 11
0
void MemoryCache::replace(Resource* newResource, Resource* oldResource)
{
    ASSERT(newResource->cacheIdentifier() == oldResource->cacheIdentifier());
    ResourceMap* resources = ensureResourceMap(oldResource->cacheIdentifier());
    if (MemoryCacheEntry* oldEntry = resources->get(oldResource->url()))
        evict(oldEntry);
    add(newResource);
    if (newResource->decodedSize() && newResource->hasClients())
        insertInLiveDecodedResourcesList(resources->get(newResource->url()));
}
Exemplo n.º 12
0
Resource* MemoryCache::resourceForURL(const KURL& resourceURL)
{
    ASSERT(WTF::isMainThread());
    KURL url = removeFragmentIdentifierIfNeeded(resourceURL);
    Resource* resource = m_resources.get(url);
    if (resource && !resource->makePurgeable(false)) {
        ASSERT(!resource->hasClients());
        evict(resource);
        return 0;
    }
    return resource;
}
Exemplo n.º 13
0
  // Insert key/value into the cache.
  void insert(const Key& key, const Value& value)
  {
    if (keys.size() == capacity) {
      evict();
    }

    // Get a "pointer" into the lru list for efficient update.
    typename list::iterator i = keys.insert(keys.end(), key);

    // Save key/value and "pointer" into lru list.
    values.insert(std::make_pair(key, std::make_pair(value, i)));
  }
Exemplo n.º 14
0
void MemoryCache::replace(Resource* newResource, Resource* oldResource)
{
    evict(oldResource);
    ASSERT(!m_resources.get(newResource->url()));
    m_resources.set(newResource->url(), newResource);
    newResource->setInCache(true);
    insertInLRUList(newResource);
    int delta = newResource->size();
    if (newResource->decodedSize() && newResource->hasClients())
        insertInLiveDecodedResourcesList(newResource);
    if (delta)
        adjustSize(newResource->hasClients(), delta);
}
Exemplo n.º 15
0
void MemoryCache::prune(Resource* justReleasedResource)
{
    TRACE_EVENT0("renderer", "MemoryCache::prune()");

    if (m_inPruneResources)
        return;
    if (m_liveSize + m_deadSize <= m_capacity && m_maxDeadCapacity && m_deadSize <= m_maxDeadCapacity) // Fast path.
        return;

    // To avoid burdening the current thread with repetitive pruning jobs,
    // pruning is postponed until the end of the current task. If it has
    // been more than m_maxPruneDeferralDelay since the last prune,
    // then we prune immediately.
    // If the current thread's run loop is not active, then pruning will happen
    // immediately only if it has been over m_maxPruneDeferralDelay
    // since the last prune.
    double currentTime = WTF::currentTime();
    if (m_prunePending) {
        if (currentTime - m_pruneTimeStamp >= m_maxPruneDeferralDelay) {
            pruneNow(currentTime, AutomaticPrune);
        }
    } else {
        if (currentTime - m_pruneTimeStamp >= m_maxPruneDeferralDelay) {
            pruneNow(currentTime, AutomaticPrune); // Delay exceeded, prune now.
        } else {
            // Defer.
            Platform::current()->currentThread()->addTaskObserver(this);
            m_prunePending = true;
        }
    }

    if (m_prunePending && m_deadSize > m_maxDeferredPruneDeadCapacity && justReleasedResource) {
        // The following eviction does not respect LRU order, but it can be done
        // immediately in constant time, as opposed to pruneDeadResources, which
        // we would rather defer because it is O(N), which would make tear-down of N
        // objects O(N^2) if we pruned immediately. This immediate eviction is a
        // safeguard against runaway memory consumption by dead resources
        // while a prune is pending.
        // Main Resources in the cache are only substitue data that was
        // precached and should not be evicted.
        if (justReleasedResource->type() != Resource::MainResource) {
            if (MemoryCacheEntry* entry = getEntryForResource(justReleasedResource))
                evict(entry);
        }

        // As a last resort, prune immediately
        if (m_deadSize > m_maxDeferredPruneDeadCapacity)
            pruneNow(currentTime, AutomaticPrune);
    }
}
Exemplo n.º 16
0
        ///////////////////////////////////////////////////////////////////////
        /// \brief Change the maximum size this cache can grow to
        ///
        /// \param max_size    [in] The new maximum size this cache will be
        ///             allowed to grow to.
        ///
        void reserve(size_type max_size)
        {
            if(max_size > max_size_)
            {
                max_size_ = max_size;
                return;
            }

            max_size_ = max_size;
            while(current_size_ > max_size_)
            {
                evict();
            }
        }
Exemplo n.º 17
0
void IOSurfacePool::willAddSurface(IOSurface& surface, bool inUse)
{
    CachedSurfaceDetails& details = m_surfaceDetails.add(&surface, CachedSurfaceDetails()).iterator->value;
    details.resetLastUseTime();

    surface.releaseGraphicsContext();

    size_t surfaceBytes = surface.totalBytes();

    evict(surfaceBytes);

    m_bytesCached += surfaceBytes;
    if (inUse)
        m_inUseBytesCached += surfaceBytes;
}
Exemplo n.º 18
0
void MemoryCache::evictResources()
{
    while (true) {
        ResourceMapIndex::iterator resourceMapIter = m_resourceMaps.begin();
        if (resourceMapIter == m_resourceMaps.end())
            break;
        ResourceMap* resources = resourceMapIter->value.get();
        while (true) {
            ResourceMap::iterator resourceIter = resources->begin();
            if (resourceIter == resources->end())
                break;
            evict(resourceIter->value.get());
        }
        m_resourceMaps.remove(resourceMapIter);
    }
}
Exemplo n.º 19
0
        void insert_nonexist(key_type const & key, entry_type const & entry)
        {
            // insert ...
            storage_.push_front(entry_pair(key, entry));
            map_[key] = storage_.begin();
            ++current_size_;

            // update statistics
            statistics_.got_insertion();

            // Do we need to evict a cache entry?
            if(current_size_ > max_size_)
            {
                // evict an entry
                evict();
            }
        }
Exemplo n.º 20
0
Resource* MemoryCache::resourceForURL(const KURL& resourceURL, const String& cacheIdentifier)
{
    ASSERT(WTF::isMainThread());
    ResourceMap* resources = m_resourceMaps.get(cacheIdentifier);
    if (!resources)
        return nullptr;
    KURL url = removeFragmentIdentifierIfNeeded(resourceURL);
    MemoryCacheEntry* entry = resources->get(url);
    if (!entry)
        return nullptr;
    Resource* resource = entry->m_resource.get();
    if (resource && !resource->lock()) {
        ASSERT(!resource->hasClients());
        bool didEvict = evict(entry);
        ASSERT_UNUSED(didEvict, didEvict);
        return nullptr;
    }
    return resource;
}
Exemplo n.º 21
0
/* Free's all data associated with the frame with kernel virtual address PG. */
void frame_free(struct frame *f) {
  struct hash_elem *e;
  ASSERT(lock_held_by_current_thread(&f->evicting));

  /* Delete frame from frame table. */
  lock_acquire(&ft_lock);
  ASSERT(hash_delete(&frame_table, &f->elem) != NULL);

	/* Evict page from frame. */
 	if(f->page != NULL)
	  evict(f);
  		
  ASSERT(list_empty(&f->evicting.semaphore.waiters));

	/* Free resources */
	palloc_free_page(f->addr);
  free(f);
  lock_release(&ft_lock);
}
Exemplo n.º 22
0
void MemoryCache::revalidationSucceeded(CachedResource* revalidatingResource, const ResourceResponse& response)
{
    CachedResource* resource = revalidatingResource->resourceToRevalidate();
    ASSERT(resource);
    ASSERT(!resource->inCache());
    ASSERT(resource->isLoaded());
    ASSERT(revalidatingResource->inCache());

    // Calling evict() can potentially delete revalidatingResource, which we use
    // below. This mustn't be the case since revalidation means it is loaded
    // and so canDelete() is false.
    ASSERT(!revalidatingResource->canDelete());

    evict(revalidatingResource);

    CachedResourceMap& resources = getSessionMap(resource->sessionID());
#if ENABLE(CACHE_PARTITIONING)
    ASSERT(!resources.get(resource->url()) || !resources.get(resource->url())->get(resource->cachePartition()));
    CachedResourceItem* originMap = resources.get(resource->url());
    if (!originMap) {
        originMap = new CachedResourceItem;
        resources.set(resource->url(), adoptPtr(originMap));
    }
    originMap->set(resource->cachePartition(), resource);
#else
    ASSERT(!resources.get(resource->url()));
    resources.set(resource->url(), resource);
#endif
    resource->setInCache(true);
    resource->updateResponseAfterRevalidation(response);
    insertInLRUList(resource);
    int delta = resource->size();
    if (resource->decodedSize() && resource->hasClients())
        insertInLiveDecodedResourcesList(resource);
    if (delta)
        adjustSize(resource->hasClients(), delta);
    
    revalidatingResource->switchClientsToRevalidatedResource();
    ASSERT(!revalidatingResource->m_deleted);
    // this deletes the revalidating resource
    revalidatingResource->clearResourceToRevalidate();
}
Exemplo n.º 23
0
void GlobalTrackCache::evictAndSave(
        GlobalTrackCacheEntryPointer cacheEntryPtr) {
    DEBUG_ASSERT(cacheEntryPtr);

    // We need to besure this is always called from the main thread
    // because we can only access the DB from it and we must not loose the
    // the lock until all changes are persistently stored in file and DB
    // to not hand out the track again with old metadata.
    DEBUG_ASSERT(QApplication::instance()->thread() == QThread::currentThread());

    GlobalTrackCacheLocker cacheLocker;

    if (!cacheEntryPtr->getSavingWeakPtr().expired()) {
        // We have handed out (revived) this track again after our reference count
        // drops to zero and before acquire the lock at the beginning of this function
        if (debugLogEnabled()) {
            kLogger.debug()
                    << "Skip to evict and save a revived or reallocated track"
                    << cacheEntryPtr->getPlainPtr();
        }
        return;
    }

    if (!evict(cacheEntryPtr->getPlainPtr())) {
        // A scond deleter has already evict the track from cache after our
        // reference count drops to zero and before acquire the lock at the
        // beginning of this function
        if (debugLogEnabled()) {
            kLogger.debug()
                    << "Skip to save an already evicted track"
                    << cacheEntryPtr->getPlainPtr();
        }
        return;
    }

    DEBUG_ASSERT(isEvicted(cacheEntryPtr->getPlainPtr()));
    m_pSaver->saveCachedTrack(cacheEntryPtr->getPlainPtr());

    // here the cacheEntryPtr goes out of scope, the cache is deleted
    // including the owned track
}
Exemplo n.º 24
0
void CondorFileBuffer::trim()
{
	CondorChunk *best_chunk,*i;
	off_t space_used;

	while(1) {
		space_used = 0;
		best_chunk = head;

		for( i=head; i; i=i->next ) {
			if( i->last_used < best_chunk->last_used ) {
				best_chunk = i;
			}
			space_used += i->size;
		}

		if( space_used <= buffer_size) return;

		evict( best_chunk );
	}
}
Exemplo n.º 25
0
CachedResource* MemoryCache::resourceForRequest(const ResourceRequest& request)
{
    ASSERT(WTF::isMainThread());
    KURL url = removeFragmentIdentifierIfNeeded(request.url());
#if ENABLE(CACHE_PARTITIONING)
    CachedResourceItem* item = m_resources.get(url);
    CachedResource* resource = 0;
    if (item)
        resource = item->get(request.cachePartition());
#else
    CachedResource* resource = m_resources.get(url);
#endif
    bool wasPurgeable = MemoryCache::shouldMakeResourcePurgeableOnEviction() && resource && resource->isPurgeable();
    if (resource && !resource->makePurgeable(false)) {
        ASSERT(!resource->hasClients());
        evict(resource);
        return 0;
    }
    // Add the size back since we had subtracted it when we marked the memory as purgeable.
    if (wasPurgeable)
        adjustSize(resource->hasClients(), resource->size());
    return resource;
}
Exemplo n.º 26
0
    Page * Pager::fetch_page(Address addr)
    {
        Page * res = index.get(addr);
        if (res){ //cache hit
            //remove from list
            res->prev->next = res->next;
            res->next->prev = res->prev;

            //insert to head
            res->next = list_head->next;
            res->prev = list_head;
            res->prev->next = res;
            res->next->prev = res;

            return res;
        }else{ //cache miss
            if (size == max_size){ //replace
                Address tmp = evict(list_head->prev);
                index.erase(tmp);
                --size;
            }

            ++size;
            Page * res = read_from_disk(addr);

            //insert to head
            res->next = list_head->next;
            res->prev = list_head;
            res->prev->next = res;
            res->next->prev = res;

            index.put(addr, res);

            return res;
        }
    }
Exemplo n.º 27
0
AFK_VapourCell::~AFK_VapourCell()
{
    evict();
}
Exemplo n.º 28
0
void MemoryCache::pruneDeadResources()
{
    unsigned capacity = deadCapacity();
    if (!m_deadSize || (capacity && m_deadSize <= capacity))
        return;

    unsigned targetSize = static_cast<unsigned>(capacity * cTargetPrunePercentage); // Cut by a percentage to avoid immediately pruning again.

    int size = m_allResources.size();

    // See if we have any purged resources we can evict.
    for (int i = 0; i < size; i++) {
        Resource* current = m_allResources[i].m_tail;
        while (current) {
            Resource* prev = current->m_prevInAllResourcesList;
            if (current->wasPurged()) {
                ASSERT(!current->hasClients());
                ASSERT(!current->isPreloaded());
                evict(current);
            }
            current = prev;
        }
    }
    if (targetSize && m_deadSize <= targetSize)
        return;

    bool canShrinkLRULists = true;
    for (int i = size - 1; i >= 0; i--) {
        // Remove from the tail, since this is the least frequently accessed of the objects.
        Resource* current = m_allResources[i].m_tail;

        // First flush all the decoded data in this queue.
        while (current) {
            // Protect 'previous' so it can't get deleted during destroyDecodedData().
            ResourcePtr<Resource> previous = current->m_prevInAllResourcesList;
            ASSERT(!previous || previous->inCache());
            if (!current->hasClients() && !current->isPreloaded() && current->isLoaded()) {
                // Destroy our decoded data. This will remove us from
                // m_liveDecodedResources, and possibly move us to a different
                // LRU list in m_allResources.
                current->destroyDecodedData();

                if (targetSize && m_deadSize <= targetSize)
                    return;
            }
            // Decoded data may reference other resources. Stop iterating if 'previous' somehow got
            // kicked out of cache during destroyDecodedData().
            if (previous && !previous->inCache())
                break;
            current = previous.get();
        }

        // Now evict objects from this queue.
        current = m_allResources[i].m_tail;
        while (current) {
            ResourcePtr<Resource> previous = current->m_prevInAllResourcesList;
            ASSERT(!previous || previous->inCache());
            if (!current->hasClients() && !current->isPreloaded() && !current->isCacheValidator()) {
                evict(current);
                if (targetSize && m_deadSize <= targetSize)
                    return;
            }
            if (previous && !previous->inCache())
                break;
            current = previous.get();
        }

        // Shrink the vector back down so we don't waste time inspecting
        // empty LRU lists on future prunes.
        if (m_allResources[i].m_head)
            canShrinkLRULists = false;
        else if (canShrinkLRULists)
            m_allResources.resize(i);
    }
}
Exemplo n.º 29
0
void IndexTable::addHpackEntry(HpackEntry &entry) {
	dynamicTable_.emplace_front(entry);
	currentSize_ += entry.bytes();
	evict();
}
Exemplo n.º 30
0
void IndexTable::setHeaderTableSize(uint32_t tableSize) {
	headerTableSize_ = tableSize;
	evict();
}