Пример #1
0
Type Type_fromJavaType(Oid typeId, const char* javaTypeName)
{
	CacheEntry ce = (CacheEntry)HashMap_getByString(s_obtainerByJavaName, javaTypeName);
	if(ce == 0)
	{
		int jtlen = strlen(javaTypeName) - 2;
		if(jtlen > 0 && strcmp("[]", javaTypeName + jtlen) == 0)
		{
			Type type;
			char* elemName = palloc(jtlen+1);
			memcpy(elemName, javaTypeName, jtlen);
			elemName[jtlen] = 0;
			type = Type_getArrayType(Type_fromJavaType(InvalidOid, elemName), typeId);
			pfree(elemName);
			return type;
		}
		ereport(ERROR, (
			errcode(ERRCODE_CANNOT_COERCE),
			errmsg("No java type mapping installed for \"%s\"", javaTypeName)));
	}

	return ce->type == 0
		? ce->obtainer(typeId == InvalidOid ? ce->typeId : typeId)
		: ce->type;
}
Пример #2
0
void ImageDecodingStore::unlockCache(const ImageFrameGenerator* generator, const ScaledImageFragment* cachedImage)
{
    Vector<OwnPtr<CacheEntry> > cacheEntriesToDelete;
    {
        MutexLocker lock(m_mutex);
        cachedImage->bitmap().unlockPixels();
        ImageCacheMap::iterator iter = m_imageCacheMap.find(ImageCacheEntry::makeCacheKey(generator, cachedImage->scaledSize(), cachedImage->index(), cachedImage->generation()));
        ASSERT_WITH_SECURITY_IMPLICATION(iter != m_imageCacheMap.end());

        CacheEntry* cacheEntry = iter->value.get();
        cacheEntry->decrementUseCount();

        // Put the entry to the end of list.
        m_orderedCacheList.remove(cacheEntry);
        m_orderedCacheList.append(cacheEntry);

        // FIXME: This code is temporary such that in the new Skia
        // discardable memory path we do not cache images.
        // Once the transition is complete the logic to handle
        // image caching should be removed entirely.
        if (!s_imageCachingEnabled && !cacheEntry->useCount()) {
            removeFromCacheInternal(cacheEntry, &cacheEntriesToDelete);
            removeFromCacheListInternal(cacheEntriesToDelete);
        }
    }
}
Пример #3
0
	void MemcachedCache::setValue(const QString& key, const CacheEntry& entry)
	{
		QWriteLocker lock(&m_lock);

		// Binary key
		const QByteArray rawKey(fullKey(key));

		// Binary data
		const quint64 dateTime(qToLittleEndian(static_cast<quint64>(entry.timeStamp().toTime_t())));
		QByteArray rawData(reinterpret_cast<const char*>(&dateTime), sizeof(quint64));
		rawData.append(entry.data());

		// Store in memcached
		memcached_return rt;
		rt = memcached_set(
			m_memcached,
			rawKey.constData(),
			rawKey.length(),
			rawData.constData(),
			rawData.length(),
			0, // expire
			0 // flags
		);
		if(rt != MEMCACHED_SUCCESS)
		{
			qFatal("Memcached error: %s", memcached_strerror(m_memcached, rt));
		}
	}
Пример #4
0
Color
PackedSurface::Reader::get_pixel(int x, int y) const
{
	if (!is_opened())
		return Color();

	if (x < 0)
		x = 0;
	if (x >= surface->width)
		x = surface->width-1;
	if (y < 0)
		y = 0;
	if (y >= surface->height)
		y = surface->height-1;

	if (cache)
	{
		int chunk_index = x/ChunkSize + y/ChunkSize*surface->chunks_width;
		x %= ChunkSize;
		y %= ChunkSize;
		CacheEntry *entry = chunks[chunk_index];
		if (!entry)
		{
			const void *data;
			int size;
			bool compressed;
			surface->get_compressed_chunk(chunk_index, data, size, compressed);
			if (!compressed)
				return surface->get_pixel(&((const char*)data)[x*surface->pixel_size + y*surface->chunk_row_size]);

			entry = last;
			if (entry->chunk_index >= 0)
				chunks[entry->chunk_index] = NULL;
			entry->chunk_index = chunk_index;
			chunks[chunk_index] = entry;
			zstreambuf::unpack(entry->data(), surface->chunk_size, data, size);
		}
		if (first != entry)
		{
			entry->prev->next = entry->next;
			(entry->next ? entry->next->prev : last) = entry->prev;

			first->prev = entry;
			entry->prev = NULL;
			entry->next = first;
			first = entry;
		}
		return surface->get_pixel(entry->data(x*surface->pixel_size + y*surface->chunk_row_size));
	}
	else
	if (surface->pixel_size)
	{
		return surface->get_pixel(&surface->data[x*surface->pixel_size + y*surface->row_size]);
	}
	return surface->constant;
}
const ScaledImageFragment* ImageDecodingStore::overwriteAndLockCache(const ImageFrameGenerator* generator, const ScaledImageFragment* cachedImage, PassOwnPtr<ScaledImageFragment> newImage)
{
    OwnPtr<ImageDecoder> trash;
    const ScaledImageFragment* newCachedImage = 0;
    {
        MutexLocker lock(m_mutex);
        cachedImage->bitmap().unlockPixels();
        CacheMap::iterator iter = m_cacheMap.find(std::make_pair(generator, cachedImage->scaledSize()));
        ASSERT(iter != m_cacheMap.end());

        CacheEntry* cacheEntry = iter->value.get();
        ASSERT(cacheEntry->useCount() == 1);
        ASSERT(!cacheEntry->cachedImage()->isComplete());

        bool isNewImageDiscardable = DiscardablePixelRef::isDiscardable(newImage->bitmap().pixelRef());
        if (cacheEntry->isDiscardable() && !isNewImageDiscardable)
            incrementMemoryUsage(cacheEntry->memoryUsageInBytes());
        else if (!cacheEntry->isDiscardable() && isNewImageDiscardable)
            decrementMemoryUsage(cacheEntry->memoryUsageInBytes());
        trash = cacheEntry->overwriteCachedImage(newImage);
        newCachedImage = cacheEntry->cachedImage();
        // Lock the underlying SkBitmap to prevent it from being purged.
        newCachedImage->bitmap().lockPixels();
    }

    return newCachedImage;
}
Пример #6
0
            void runTask()
            {
                CacheEntry *cached = new CacheEntry(_index, *_sink.d->font, *_sink.d,
                                                    *_sink.d->entryAtlas);
                cached->wrap(_styledText, _sink._width);

                //usleep(75000); // TODO -- remove this testing aid

                DENG2_GUARD_FOR(_sink._wrappedEntries, G);
                _sink._wrappedEntries << cached;
            }
Пример #7
0
void ImageDecodingStore::unlockDecoder(const ImageFrameGenerator* generator, const ImageDecoder* decoder)
{
    MutexLocker lock(m_mutex);
    DecoderCacheMap::iterator iter = m_decoderCacheMap.find(DecoderCacheEntry::makeCacheKey(generator, decoder));
    ASSERT_WITH_SECURITY_IMPLICATION(iter != m_decoderCacheMap.end());

    CacheEntry* cacheEntry = iter->value.get();
    cacheEntry->decrementUseCount();

    // Put the entry to the end of list.
    m_orderedCacheList.remove(cacheEntry);
    m_orderedCacheList.append(cacheEntry);
}
void ImageDecodingStore::unlockCache(const ImageFrameGenerator* generator, const ScaledImageFragment* cachedImage)
{
    MutexLocker lock(m_mutex);
    cachedImage->bitmap().unlockPixels();
    CacheMap::iterator iter = m_cacheMap.find(std::make_pair(generator, cachedImage->scaledSize()));
    ASSERT(iter != m_cacheMap.end());

    CacheEntry* cacheEntry = iter->value.get();
    cacheEntry->decrementUseCount();

    // Put the entry to the end of list.
    m_orderedCacheList.remove(cacheEntry);
    m_orderedCacheList.append(cacheEntry);
}
Пример #9
0
  void GlobalCache::prune_unmarked(unsigned int mark) {
    for(size_t i = 0; i < CPU_CACHE_SIZE; i++) {
      CacheEntry* entry = &entries[i];
      Object* klass = entry->klass;
      if(!klass) continue;

      Object* mod = entry->module;
      Object* exec = entry->method;

      if(!klass->marked_p(mark) || !mod->marked_p(mark) || !exec->marked_p(mark)) {
        entry_names[i] = NULL;
        entry->clear();
      }
    }
  }
Пример #10
0
PxOsdTopologyRefinerSharedPtr
PxOsdRefinerCache::GetOrCreateRefiner(
    PxOsdMeshTopology topology,
    bool bilinearStencils,
    int level,
    StencilTableSharedPtr *cvStencils,
    PatchTableSharedPtr *patchTable)
{
    TRACE_FUNCTION();    
    std::lock_guard<std::mutex> lock(_mutex);    

    if ((topology.GetScheme() != PxOsdOpenSubdivTokens->catmullClark
                and topology.GetScheme() != PxOsdOpenSubdivTokens->catmark)
          and not bilinearStencils) {
        // XXX: This refiner will be adaptively refined, so we need to ensure
        // we're using catmull-clark subdivision scheme, since that's the only
        // option currently. Once OpenSubdiv supports adaptive loop subdivision,
        // we should remove this hack.
        topology.SetScheme(PxOsdOpenSubdivTokens->catmullClark);
    }

    // This is quick, just compute the hash
    CacheEntry entry = CacheEntry(topology, bilinearStencils, level);

    _CacheEntrySet::iterator iter = _cachedEntries.find(entry);

    if (iter != _cachedEntries.end()) {
        // Cache hit, return the refiner already constructed
        if (cvStencils)
            *cvStencils = iter->cvStencils;
        if (patchTable)
            *patchTable = iter->patchTable;
        return iter->refiner;
    }

    // Cache miss, do the expensive work of creating a new refiner
    entry.CreateRefiner();

    if (cvStencils)    
        *cvStencils = entry.cvStencils;
    if (patchTable)
        *patchTable = entry.patchTable;
    PxOsdTopologyRefinerSharedPtr refiner = entry.refiner;

    _cachedEntries.insert(entry);    

    return refiner;
}
Пример #11
0
void
nsPreflightCache::RemoveEntries(nsIURI* aURI, nsIPrincipal* aPrincipal)
{
    CacheEntry* entry;
    nsCString key;
    if (GetCacheKey(aURI, aPrincipal, true, key) &&
            mTable.Get(key, &entry)) {
        entry->removeFrom(mList);
        mTable.Remove(key);
    }

    if (GetCacheKey(aURI, aPrincipal, false, key) &&
            mTable.Get(key, &entry)) {
        entry->removeFrom(mList);
        mTable.Remove(key);
    }
}
Пример #12
0
void QueryService::cachedCall( const QString &method,
                               const QValueList<QVariant> &args,
                               const char *slot )
{
	kdDebug() << "Calling " << method << endl;

	const QString cacheKey = Cache::getCacheKey( m_xmlrpcServer->url().url(),
	                                             method, args );

	CacheEntry *cacheEntry = Cache::self().find( cacheKey );
	if ( cacheEntry != 0 && cacheEntry->isValid() ) {
		kdDebug() << "Using cached result." << endl;
		SlotCaller::call( this, slot, cacheEntry->result() );
	} else {
		kdDebug() << "No cached result found, querying server." << endl;
		m_xmlrpcServer->call( method, args, this, slot );
	}
}
Пример #13
0
        void runTask()
        {
            while(_next >= 0 && _cancelLevel == d->cancelRewrap)
            {
                CacheEntry *e = d->cache[_next--];

                // Rewrap and update total height.
                int delta = e->rewrap(_width);
                d->self.modifyContentHeight(delta);

                /// @todo Adjust the scroll position if this entry is below it
                /// (would cause a visible scroll to occur).

                if(_next < d->visibleRange.end)
                {
                    // Above the visible range, no need to rush.
                    TimeDelta(.001).sleep();
                }
            }
        }
Пример #14
0
QT_BEGIN_NAMESPACE

/*******************************************************************************
 *
 * class QVectorPath
 *
 */
QVectorPath::~QVectorPath()
{
    if (m_hints & ShouldUseCacheHint) {
        CacheEntry *e = m_cache;
        while (e) {
            if (e->data)
                e->cleanup(e->engine, e->data);
            CacheEntry *n = e->next;
            delete e;
            e = n;
        }
    }
}
Пример #15
0
  void GlobalCache::prune_young() {
    for(size_t i = 0; i < CPU_CACHE_SIZE; i++) {
      CacheEntry* entry = &entries[i];
      bool clear = false;

      Object* klass = entry->klass;
      if(!klass) continue;

      if(klass->young_object_p()) {
        if(klass->forwarded_p()) {
          Module* fwd = force_as<Module>(klass->forward());
          entry->klass = fwd;
        } else {
          clear = true;
        }
      }

      Object* mod = entry->module;
      if(mod->young_object_p()) {
        if(mod->forwarded_p()) {
          entry->module = force_as<Module>(mod->forward());
        } else {
          clear = true;
        }
      }

      Object* exec = entry->method;
      if(exec->young_object_p()) {
        if(exec->forwarded_p()) {
          entry->method = force_as<Executable>(exec->forward());
        } else {
          clear = true;
        }
      }

      if(clear) {
        entry_names[i] = NULL;
        entry->clear();
      }
    }
  }
Пример #16
0
void ImageDecodingStore::removeDecoder(const ImageFrameGenerator* generator, const ImageDecoder* decoder)
{
    Vector<OwnPtr<CacheEntry> > cacheEntriesToDelete;
    {
        MutexLocker lock(m_mutex);
        DecoderCacheMap::iterator iter = m_decoderCacheMap.find(DecoderCacheEntry::makeCacheKey(generator, decoder));
        ASSERT_WITH_SECURITY_IMPLICATION(iter != m_decoderCacheMap.end());

        CacheEntry* cacheEntry = iter->value.get();
        ASSERT(cacheEntry->useCount());
        cacheEntry->decrementUseCount();

        // Delete only one decoder cache entry. Ownership of the cache entry
        // is transfered to cacheEntriesToDelete such that object can be deleted
        // outside of the lock.
        removeFromCacheInternal(cacheEntry, &cacheEntriesToDelete);

        // Remove from LRU list.
        removeFromCacheListInternal(cacheEntriesToDelete);
    }
}
Пример #17
0
void Client::continueClient(pollfd * ufds) {
	short revent = ufds->revents;
	CacheEntry *cache = Cache::getCache()->get(url);

	if( (revent & (POLLIN | POLLOUT)) == 0) {
		client->closeSocket();
		return ;
	}
	if(revent & POLLOUT) {
		if(errorState != OK) {
			sendError();
			client->closeSocket();
			return;
		} else if(cacheState == CACHE_READ && cache != NULL) {
			client->sendData(cache->getData(), cache->getLength());
			if(cache->getLength() == client->getWritten() && cache->isFinished()) {
				client->closeSocket();
			}
		} else if(remote != NULL && remote->getAvaliable() > 0) {
			client->sendData(remote->getBuffer(), remote->getAvaliable());
			client->resetWritten();
			remote->resetAvaliable();
		}
	}
	if(revent & POLLIN) {
		client->recvData();
		if(strstr(client->getBuffer(), "GET http://www.linux.org.ru/news/hardware/8497632")) {
			int i = 5;
		}
		if(cacheState == CACHE_NO_SET && remote == NULL && url.empty() && errorState == OK) {
			connectToRemote(client->getBuffer());
			if(remote != NULL) {
				//client->catBuf();
			}
		}
	}
}
Пример #18
0
QT_BEGIN_NAMESPACE

#if !defined(QT_MAX_CACHED_GLYPH_SIZE)
#  define QT_MAX_CACHED_GLYPH_SIZE 64
#endif

/*******************************************************************************
 *
 * class QVectorPath
 *
 */
QVectorPath::~QVectorPath()
{
    if (m_hints & ShouldUseCacheHint) {
        CacheEntry *e = m_cache;
        while (e) {
            if (e->data)
                e->cleanup(e->engine, e->data);
            CacheEntry *n = e->next;
            delete e;
            e = n;
        }
    }
}
Пример #19
0
inline void CachePolicy::registerCacheAccess( Directory& dir, uint64_t tag, size_t size, bool input, bool output )
{
   bool didCopyIn = false;
   CacheEntry *ce;
   ce = _cache.getEntry( tag );
   unsigned int version=0;
   if ( ce != NULL ) version = ce->getVersion()+1;
   DirectoryEntry *de = dir.getEntry( tag, version );

   if ( de == NULL ) { // Memory access not registered in the directory
      bool inserted;
      DirectoryEntry d = DirectoryEntry( tag, 0, ( output ? &_cache : NULL ), dir.getCacheMapSize() );
      de = &(dir.insert( tag, d, inserted ));
      if (!inserted) {
         if ( output ) {
            de->setOwner(&_cache);
            de->setInvalidated(false);
            ce->setFlushTo( &dir );
         }
      }

      CacheEntry c =  CacheEntry( NULL, size, tag, 0, output, input );
      ce = &(_cache.insert( tag, c, inserted ));
      if (inserted) { // allocate it
         ce->setAddress( _cache.allocate( dir, size , tag) );
         ce->setAllocSize( size );
         if (input) {
            CopyDescriptor cd = CopyDescriptor(tag);
            if ( _cache.copyDataToCache( cd, size ) ) {
               ce->setCopying(false);
            }
         }
      } else {        // wait for address
         NANOS_INSTRUMENT( sys.getInstrumentation()->raiseOpenBurstEvent ( sys.getInstrumentation()->getInstrumentationDictionary()->getEventKey( "cache-wait" ), NANOS_CACHE_EVENT_REGISTER_CACHE_ACCESS_94 ); )
         while ( ce->getAddress() == NULL ) {}
         NANOS_INSTRUMENT( sys.getInstrumentation()->raiseCloseBurstEvent ( sys.getInstrumentation()->getInstrumentationDictionary()->getEventKey( "cache-wait" ), 0 ); )
      }
bool ImageDecodingStore::lockCache(const ImageFrameGenerator* generator, const SkISize& scaledSize, CacheCondition condition, const ScaledImageFragment** cachedImage, ImageDecoder** decoder)
{
    ASSERT(cachedImage);

    CacheEntry* cacheEntry = 0;
    Vector<OwnPtr<CacheEntry> > cacheEntriesToDelete;
    {
        MutexLocker lock(m_mutex);
        CacheMap::iterator iter = m_cacheMap.find(std::make_pair(generator, scaledSize));
        if (iter == m_cacheMap.end())
            return false;
        cacheEntry = iter->value.get();
        ScaledImageFragment* image = cacheEntry->cachedImage();
        if (condition == CacheMustBeComplete && !image->isComplete())
            return false;

        // Incomplete cache entry cannot be used more than once.
        ASSERT(image->isComplete() || !cacheEntry->useCount());

        image->bitmap().lockPixels();
        if (image->bitmap().getPixels()) {
            // Increment use count such that it doesn't get evicted.
            cacheEntry->incrementUseCount();

            // Complete cache entry doesn't have a decoder.
            ASSERT(!image->isComplete() || !cacheEntry->cachedDecoder());

            if (decoder)
                *decoder = cacheEntry->cachedDecoder();
            *cachedImage = image;
        } else {
            image->bitmap().unlockPixels();
            removeFromCacheInternal(cacheEntry, &cacheEntriesToDelete);
            removeFromCacheListInternal(cacheEntriesToDelete);
            return false;
        }
    }

    return true;
}
Пример #21
0
/*
 * Extraction d'une page du cache
 * Cache page extraction
 */
Page* getNextPage()
{
    CacheEntry *entry = NULL;
    unsigned long nr=0;
    bool notUnregister = false;
    Page *page;

    // Get the next page number
    switch (_policy) {
        case EveryPagesIncreasing:
            nr = _lastPageRequested + 1;
            break;
        case EvenDecreasing:
            if (_lastPageRequested > 2)
                nr = _lastPageRequested - 2;
            else {
                nr = 1;
                setCachePolicy(OddIncreasing);
            }
            break;
        case OddIncreasing:
            if (!_lastPageRequested)
                nr = 1;
            else
                nr = _lastPageRequested + 2;
            break;
    }

    DEBUGMSG(_("Next requested page : %lu (# pages into memory=%lu/%u)"), nr, 
        _pagesInMemory, CACHESIZE);

    // Wait for the page
    while (nr && (!_numberOfPages || _numberOfPages >= nr)) {
        {
            _pageTableLock.lock();
            if (_maxPagesInTable >= nr && _pages[nr - 1] && 
                !_pages[nr - 1]->isSwapped()) {
                entry = _pages[nr - 1];
                _pages[nr - 1] = NULL;
                if (!entry->previous() && !entry->next() && entry != _inMemory)
                    notUnregister = true;
                if (entry->previous())
                    entry->previous()->setNext(entry->next());
                if (entry->next())
                    entry->next()->setPrevious(entry->previous());
                if (entry == _inMemory)
                    _inMemory = entry->next();
                if (entry == _inMemoryLast)
                    _inMemoryLast = NULL;
                _pageTableLock.unlock();
                break;
            } else if (_maxPagesInTable >= nr && _pages[nr - 1] && 
                _pages[nr - 1]->isSwapped())
                _work++;
            _pageRequested = nr;
            _pageTableLock.unlock();
        }
        _pageAvailable--;
    };

    // Extract the page instance
    if (!entry)
        return NULL;
    _pagesInTable--;
    _lastPageRequested = nr;
    page = entry->page();
    delete entry;

    // Preload a new page
    if (!notUnregister)
        _pagesInMemory--;
    _work++;

    return page;
}
Пример #22
0
static void* _cacheControllerThread(void *_exitVar)
{
    bool *needToExit = (bool *)_exitVar;
    bool whatToDo = true;

    DEBUGMSG(_("Cache controller thread loaded and is waiting for a job"));
    while (!(*needToExit)) {
        bool preloadPage = false;

        // Waiting for a job
        _work--;

#ifdef DUMP_CACHE
        if (_pagesInMemory) {
            CacheEntry *tmp = _inMemory;

            fprintf(stderr, _("DEBUG: Cache dump: "));
            for (unsigned int i=0; i < _pagesInMemory && tmp; i++) {
                fprintf(stderr, "%lu ", tmp->page()->pageNr());
                tmp = tmp->next();
            }
            fprintf(stderr, "\n");
        } else
            fprintf(stderr, _("DEBUG: Cache empty\n"));
#endif /* DUMP_CACHE */

        // Does the thread needs to exit?
        if (*needToExit)
            break;


        /*
         * Check what action to do
         */
        // Nothing?
        if (!_waitingList && (_pagesInMemory == CACHESIZE || 
            _pagesInMemory == _pagesInTable))
            continue;
        // new page to append and pages to preload?
        // Choose one action of them to do (and inverse the action to do at 
        // each loop)
        if (_waitingList && !(_pagesInMemory == CACHESIZE || 
            _pagesInMemory == _pagesInTable)) {
            preloadPage = whatToDo;
            whatToDo = ~whatToDo;
        // One of the two thing to do
        } else
            preloadPage = (_waitingList == NULL);

        /*
         * Preload a page
         */
        if (preloadPage) {
            __manageMemoryCache(NULL);

        /*
         * Store a page
         */
        } else {
            CacheEntry *entry;

            // Get the cache entry to store
            {
                _waitingListLock.lock();
                entry = _waitingList;
                _waitingList = entry->next();
                if (_lastWaitingList == entry)
                    _lastWaitingList = NULL;
                _waitingListLock.unlock();
            }

            // Store the entry in the page table
            {
                _pageTableLock.lock();

                // Resize the page table if needed
                while (entry->page()->pageNr() > _maxPagesInTable) {
                    if (!_maxPagesInTable) {
                        _maxPagesInTable = CACHESIZE;
                        _pages = new CacheEntry*[_maxPagesInTable];
                        memset(_pages, 0, _maxPagesInTable * 
                            sizeof(CacheEntry*));
                    } else {
                        CacheEntry** tmp = new CacheEntry*[_maxPagesInTable*10];
                        memcpy(tmp, _pages, _maxPagesInTable *
                            sizeof(CacheEntry*));
                        memset(tmp + _maxPagesInTable, 0, _maxPagesInTable * 9 *
                            sizeof(CacheEntry*));
                        delete[] _pages;
                        _pages = tmp;
                        _maxPagesInTable *= 10;
                    }
                }

                // Store the page in the table
                _pages[entry->page()->pageNr() - 1] = entry;
                _pageTableLock.unlock();
            }
            _pagesInTable++;

            // Does the main thread needs this page?
            if (_pageRequested == entry->page()->pageNr()) {
                _pageTableLock.lock();
                entry->setNext(NULL);
                entry->setPrevious(NULL);
                _pageAvailable++;
                _pageTableLock.unlock();

            // So check whether the page can be kept in memory or have to
            // be swapped on the disk
            } else 
                __manageMemoryCache(entry);
        }
    }

    DEBUGMSG(_("Cache controller unloaded. See ya"));
    return NULL;
}
Пример #23
0
llvm::Value *LocalTypeDataCache::tryGet(IRGenFunction &IGF, Key key,
                                        bool allowAbstract) {
  auto it = Map.find(key);
  if (it == Map.end()) return nullptr;
  auto &chain = it->second;

  CacheEntry *best = nullptr;
  Optional<unsigned> bestCost;

  CacheEntry *next = chain.Root;
  while (next) {
    CacheEntry *cur = next;
    next = cur->getNext();

    // Ignore abstract entries if so requested.
    if (!allowAbstract && cur->getKind() != CacheEntry::Kind::Concrete)
      continue;

    // Ignore unacceptable entries.
    if (!IGF.isActiveDominancePointDominatedBy(cur->DefinitionPoint))
      continue;

    // If there's a collision, compare by cost, ignoring higher-cost entries.
    if (best) {
      // Compute the cost of the best entry if we haven't done so already.
      // If that's zero, go ahead and short-circuit out.
      if (!bestCost) {
        bestCost = best->cost();
        if (*bestCost == 0) break;
      }

      auto curCost = cur->cost();
      if (curCost >= *bestCost) continue;

      // Replace the best cost and fall through.
      bestCost = curCost;
    }
    best = cur;
  }

  // If we didn't find anything, we're done.
  if (!best) return nullptr;

  // Okay, we've found the best entry available.
  switch (best->getKind()) {

  // For concrete caches, this is easy.
  case CacheEntry::Kind::Concrete:
    return static_cast<ConcreteCacheEntry*>(best)->Value;

  // For abstract caches, we need to follow a path.
  case CacheEntry::Kind::Abstract: {
    auto entry = static_cast<AbstractCacheEntry*>(best);

    // Follow the path.
    auto &source = AbstractSources[entry->SourceIndex];
    auto result = entry->follow(IGF, source);

    // Following the path automatically caches at every point along it,
    // including the end.
    assert(chain.Root->DefinitionPoint == IGF.getActiveDominancePoint());
    assert(chain.Root->getKind() == CacheEntry::Kind::Concrete);

    return result;
  }

  }
  llvm_unreachable("bad cache entry kind");
}
Пример #24
0
void LocalTypeDataCache::
addAbstractForFulfillments(IRGenFunction &IGF, FulfillmentMap &&fulfillments,
                           llvm::function_ref<AbstractSource()> createSource) {
  // Add the source lazily.
  Optional<unsigned> sourceIndex;
  auto getSourceIndex = [&]() -> unsigned {
    if (!sourceIndex) {
      AbstractSources.emplace_back(createSource());
      sourceIndex = AbstractSources.size() - 1;
    }
    return *sourceIndex;
  };

  for (auto &fulfillment : fulfillments) {
    CanType type = CanType(fulfillment.first.first);
    LocalTypeDataKind localDataKind;

    // For now, ignore witness-table fulfillments when they're not for
    // archetypes.
    if (ProtocolDecl *protocol = fulfillment.first.second) {
      if (auto archetype = dyn_cast<ArchetypeType>(type)) {
        auto conformsTo = archetype->getConformsTo();
        auto it = std::find(conformsTo.begin(), conformsTo.end(), protocol);
        if (it == conformsTo.end()) continue;
        localDataKind = LocalTypeDataKind::forAbstractProtocolWitnessTable(*it);
      } else {
        continue;
      }

    } else {
      // Ignore type metadata fulfillments for non-dependent types that
      // we can produce very cheaply.  We don't want to end up emitting
      // the type metadata for Int by chasing through N layers of metadata
      // just because that path happens to be in the cache.
      if (!type->hasArchetype() &&
          isTypeMetadataAccessTrivial(IGF.IGM, type)) {
        continue;
      }

      localDataKind = LocalTypeDataKind::forTypeMetadata();
    }

    // Find the chain for the key.
    auto key = getKey(type, localDataKind);
    auto &chain = Map[key];

    // Check whether there's already an entry that's at least as good as the
    // fulfillment.
    Optional<unsigned> fulfillmentCost;
    auto getFulfillmentCost = [&]() -> unsigned {
      if (!fulfillmentCost)
        fulfillmentCost = fulfillment.second.Path.cost();
      return *fulfillmentCost;
    };

    bool isConditional = IGF.isConditionalDominancePoint();

    bool foundBetter = false;
    for (CacheEntry *cur = chain.Root, *last = nullptr; cur;
         last = cur, cur = cur->getNext()) {
      // Ensure the entry is acceptable.
      if (!IGF.isActiveDominancePointDominatedBy(cur->DefinitionPoint))
        continue;

      // Ensure that the entry isn't better than the fulfillment.
      auto curCost = cur->cost();
      if (curCost == 0 || curCost <= getFulfillmentCost()) {
        foundBetter = true;
        break;
      }

      // If the entry is defined at the current point, (1) we know there
      // won't be a better entry and (2) we should remove it.
      if (cur->DefinitionPoint == IGF.getActiveDominancePoint() &&
          !isConditional) {
        // Splice it out of the chain.
        assert(!cur->isConditional());
        chain.eraseEntry(last, cur);
        break;
      }
    }
    if (foundBetter) continue;

    // Okay, make a new entry.

    // Register with the conditional dominance scope if necessary.
    if (isConditional) {
      IGF.registerConditionalLocalTypeDataKey(key);
    }

    // Allocate the new entry.
    auto newEntry = new AbstractCacheEntry(IGF.getActiveDominancePoint(),
                                           isConditional,
                                           getSourceIndex(),
                                           std::move(fulfillment.second.Path));

    // Add it to the front of the chain.
    chain.push_front(newEntry);
  }
}
Пример #25
0
nsPreflightCache::CacheEntry*
nsPreflightCache::GetEntry(nsIURI* aURI,
                           nsIPrincipal* aPrincipal,
                           bool aWithCredentials,
                           bool aCreate)
{
    nsCString key;
    if (!GetCacheKey(aURI, aPrincipal, aWithCredentials, key)) {
        NS_WARNING("Invalid cache key!");
        return nullptr;
    }

    CacheEntry* entry;

    if (mTable.Get(key, &entry)) {
        // Entry already existed so just return it. Also update the LRU list.

        // Move to the head of the list.
        entry->removeFrom(mList);
        mList.insertFront(entry);

        return entry;
    }

    if (!aCreate) {
        return nullptr;
    }

    // This is a new entry, allocate and insert into the table now so that any
    // failures don't cause items to be removed from a full cache.
    entry = new CacheEntry(key);
    if (!entry) {
        NS_WARNING("Failed to allocate new cache entry!");
        return nullptr;
    }

    NS_ASSERTION(mTable.Count() <= PREFLIGHT_CACHE_SIZE,
                 "Something is borked, too many entries in the cache!");

    // Now enforce the max count.
    if (mTable.Count() == PREFLIGHT_CACHE_SIZE) {
        // Try to kick out all the expired entries.
        TimeStamp now = TimeStamp::NowLoRes();
        mTable.Enumerate(RemoveExpiredEntries, &now);

        // If that didn't remove anything then kick out the least recently used
        // entry.
        if (mTable.Count() == PREFLIGHT_CACHE_SIZE) {
            CacheEntry* lruEntry = static_cast<CacheEntry*>(mList.popLast());
            MOZ_ASSERT(lruEntry);

            // This will delete 'lruEntry'.
            mTable.Remove(lruEntry->mKey);

            NS_ASSERTION(mTable.Count() == PREFLIGHT_CACHE_SIZE - 1,
                         "Somehow tried to remove an entry that was never added!");
        }
    }

    mTable.Put(key, entry);
    mList.insertFront(entry);

    return entry;
}
Пример #26
0
// called only by factories, treat as private
UObject* 
ICUService::getKey(ICUServiceKey& key, UnicodeString* actualReturn, const ICUServiceFactory* factory, UErrorCode& status) const 
{
    if (U_FAILURE(status)) {
        return NULL;
    }

    if (isDefault()) {
        return handleDefault(key, actualReturn, status);
    }

    ICUService* ncthis = (ICUService*)this; // cast away semantic const

    CacheEntry* result = NULL;
    {
        // The factory list can't be modified until we're done, 
        // otherwise we might update the cache with an invalid result.
        // The cache has to stay in synch with the factory list.
        // ICU doesn't have monitors so we can't use rw locks, so 
        // we single-thread everything using this service, for now.

        // if factory is not null, we're calling from within the mutex,
        // and since some unix machines don't have reentrant mutexes we
        // need to make sure not to try to lock it again.
        XMutex mutex(&lock, factory != NULL);

        if (serviceCache == NULL) {
            ncthis->serviceCache = new Hashtable(status);
            if (ncthis->serviceCache == NULL) {
                return NULL;
            }
            if (U_FAILURE(status)) {
                delete serviceCache;
                return NULL;
            }
            serviceCache->setValueDeleter(cacheDeleter);
        }

        UnicodeString currentDescriptor;
        UVectorDeleter cacheDescriptorList;
        UBool putInCache = FALSE;

        int32_t startIndex = 0;
        int32_t limit = factories->size();
        UBool cacheResult = TRUE;

        if (factory != NULL) {
            for (int32_t i = 0; i < limit; ++i) {
                if (factory == (const ICUServiceFactory*)factories->elementAt(i)) {
                    startIndex = i + 1;
                    break;
                }
            }
            if (startIndex == 0) {
                // throw new InternalError("Factory " + factory + "not registered with service: " + this);
                status = U_ILLEGAL_ARGUMENT_ERROR;
                return NULL;
            }
            cacheResult = FALSE;
        }

        do {
            currentDescriptor.remove();
            key.currentDescriptor(currentDescriptor);
            result = (CacheEntry*)serviceCache->get(currentDescriptor);
            if (result != NULL) {
                break;
            }

            // first test of cache failed, so we'll have to update
            // the cache if we eventually succeed-- that is, if we're 
            // going to update the cache at all.
            putInCache = TRUE;

            int32_t index = startIndex;
            while (index < limit) {
                ICUServiceFactory* f = (ICUServiceFactory*)factories->elementAt(index++);
                UObject* service = f->create(key, this, status);
                if (U_FAILURE(status)) {
                    delete service;
                    return NULL;
                }
                if (service != NULL) {
                    result = new CacheEntry(currentDescriptor, service);
                    if (result == NULL) {
                        delete service;
                        status = U_MEMORY_ALLOCATION_ERROR;
                        return NULL;
                    }

                    goto outerEnd;
                }
            }

            // prepare to load the cache with all additional ids that 
            // will resolve to result, assuming we'll succeed.  We
            // don't want to keep querying on an id that's going to
            // fallback to the one that succeeded, we want to hit the
            // cache the first time next goaround.
            if (cacheDescriptorList._obj == NULL) {
                cacheDescriptorList._obj = new UVector(uprv_deleteUObject, NULL, 5, status);
                if (U_FAILURE(status)) {
                    return NULL;
                }
            }
            UnicodeString* idToCache = new UnicodeString(currentDescriptor);
            if (idToCache == NULL || idToCache->isBogus()) {
                status = U_MEMORY_ALLOCATION_ERROR;
                return NULL;
            }

            cacheDescriptorList._obj->addElement(idToCache, status);
            if (U_FAILURE(status)) {
                return NULL;
            }
        } while (key.fallback());
outerEnd:

        if (result != NULL) {
            if (putInCache && cacheResult) {
                serviceCache->put(result->actualDescriptor, result, status);
                if (U_FAILURE(status)) {
                    delete result;
                    return NULL;
                }

                if (cacheDescriptorList._obj != NULL) {
                    for (int32_t i = cacheDescriptorList._obj->size(); --i >= 0;) {
                        UnicodeString* desc = (UnicodeString*)cacheDescriptorList._obj->elementAt(i);
                        serviceCache->put(*desc, result, status);
                        if (U_FAILURE(status)) {
                            delete result;
                            return NULL;
                        }

                        result->ref();
                        cacheDescriptorList._obj->removeElementAt(i);
                    }
                }
            }

            if (actualReturn != NULL) {
                // strip null prefix
                if (result->actualDescriptor.indexOf((UChar)0x2f) == 0) { // U+002f=slash (/)
                    actualReturn->remove();
                    actualReturn->append(result->actualDescriptor, 
                        1, 
                        result->actualDescriptor.length() - 1);
                } else {
                    *actualReturn = result->actualDescriptor;
                }

                if (actualReturn->isBogus()) {
                    status = U_MEMORY_ALLOCATION_ERROR;
                    delete result;
                    return NULL;
                }
            }

            UObject* service = cloneInstance(result->service);
            if (putInCache && !cacheResult) {
                delete result;
            }
            return service;
        }
    }

    return handleDefault(key, actualReturn, status);
}
Пример #27
0
void LruReplPolicy::Touch(const CacheEntry& entry) {
  std::list<std::string>::iterator it = std::find(lru_list_.begin(), lru_list_.end(), entry.getKey());
  lru_list_.splice(lru_list_.end(), lru_list_, it);
}
Пример #28
0
void LruReplPolicy::Insert(const CacheEntry& entry) {
  lru_list_.push_back(entry.getKey());
}
Пример #29
0
Type Type_fromOid(Oid typeId, jobject typeMap)
{
	CacheEntry   ce;
	HeapTuple    typeTup;
	Form_pg_type typeStruct;
	Type         type = Type_fromOidCache(typeId);

	if(type != 0)
		return type;

	typeTup    = PgObject_getValidTuple(TYPEOID, typeId, "type");
	typeStruct = (Form_pg_type)GETSTRUCT(typeTup);

	if(typeStruct->typelem != 0 && typeStruct->typlen == -1)
	{
		type = Type_getArrayType(Type_fromOid(typeStruct->typelem, typeMap), typeId);
		goto finally;
	}

	/* For some reason, the anyarray is *not* an array with anyelement as the
	 * element type. We'd like to see it that way though.
	 */
	if(typeId == ANYARRAYOID)
	{
		type = Type_getArrayType(Type_fromOid(ANYELEMENTOID, typeMap), typeId);
		goto finally;
	}

	if(typeStruct->typbasetype != 0)
	{
		/* Domain type, recurse using the base type (which in turn may
		 * also be a domain)
		 */
		type = Type_fromOid(typeStruct->typbasetype, typeMap);
		goto finally;
	}

	if(typeMap != 0)
	{
		jobject joid      = Oid_create(typeId);
		jclass  typeClass = (jclass)JNI_callObjectMethod(typeMap, s_Map_get, joid);

		JNI_deleteLocalRef(joid);
		if(typeClass != 0)
		{
			TupleDesc tupleDesc = lookup_rowtype_tupdesc_noerror(typeId, -1, true);
			type = (Type)UDT_registerUDT(typeClass, typeId, typeStruct, tupleDesc, false);
			JNI_deleteLocalRef(typeClass);
			goto finally;
		}
	}

	/* Composite and record types will not have a TypeObtainer registered
	 */
	if(typeStruct->typtype == 'c' || (typeStruct->typtype == 'p' && typeId == RECORDOID))
	{
		type = Composite_obtain(typeId);
		goto finally;
	}

	ce = (CacheEntry)HashMap_getByOid(s_obtainerByOid, typeId);
	if(ce == 0)
		/*
		 * Default to String and standard textin/textout coersion.
		 */
		type = String_obtain(typeId);
	else
	{
		type = ce->type;
		if(type == 0)
			type = ce->obtainer(typeId);
	}

finally:
	ReleaseSysCache(typeTup);
	Type_cacheByOid(typeId, type);
	return type;
}
Пример #30
0
MetadataResponse
LocalTypeDataCache::tryGet(IRGenFunction &IGF, LocalTypeDataKey key,
                           bool allowAbstract, DynamicMetadataRequest request) {
  // Use the caching key.
  key = key.getCachingKey();

  auto it = Map.find(key);
  if (it == Map.end()) return MetadataResponse();
  auto &chain = it->second;

  CacheEntry *best = nullptr;
  Optional<OperationCost> bestCost;

  CacheEntry *next = chain.Root;
  while (next) {
    CacheEntry *cur = next;
    next = cur->getNext();

    // Ignore abstract entries if so requested.
    if (!allowAbstract && !isa<ConcreteCacheEntry>(cur))
      continue;

    // Ignore unacceptable entries.
    if (!IGF.isActiveDominancePointDominatedBy(cur->DefinitionPoint))
      continue;

    // If there's a collision, compare by cost, ignoring higher-cost entries.
    if (best) {
      // Compute the cost of the best entry if we haven't done so already.
      // If that's zero, go ahead and short-circuit out.
      if (!bestCost) {
        bestCost = best->costForRequest(key, request);
        if (*bestCost == OperationCost::Free) break;
      }

      auto curCost = cur->costForRequest(key, request);
      if (curCost >= *bestCost) continue;

      // Replace the best cost and fall through.
      bestCost = curCost;
    }
    best = cur;
  }

  // If we didn't find anything, we're done.
  if (!best) return MetadataResponse();

  // Okay, we've found the best entry available.
  switch (best->getKind()) {

  // For concrete caches, this is easy.
  case CacheEntry::Kind::Concrete: {
    auto entry = cast<ConcreteCacheEntry>(best);

    if (entry->immediatelySatisfies(key, request))
      return entry->Value;

    assert(key.Kind.isAnyTypeMetadata());

    // Emit a dynamic check that the type metadata matches the request.
    // TODO: we could potentially end up calling this redundantly with a
    //   dynamic request.  Fortunately, those are used only in very narrow
    //   circumstances.
    auto response = emitCheckTypeMetadataState(IGF, request, entry->Value);

    // Add a concrete entry for the checked result.
    IGF.setScopedLocalTypeData(key, response);

    return response;
  }

  // For abstract caches, we need to follow a path.
  case CacheEntry::Kind::Abstract: {
    auto entry = cast<AbstractCacheEntry>(best);

    // Follow the path.
    auto &source = AbstractSources[entry->SourceIndex];
    auto response = entry->follow(IGF, source, request);

    // Following the path automatically caches at every point along it,
    // including the end.
    assert(chain.Root->DefinitionPoint == IGF.getActiveDominancePoint());
    assert(isa<ConcreteCacheEntry>(chain.Root));

    return response;
  }

  }
  llvm_unreachable("bad cache entry kind");
}