PooledString::Ref &PooledString::Ref::operator=(const Ref &ref) { if(ref.realString != realString) { removeRef(); setRef(ref); } return *this; }
void MatchScoringMapPreparer::prepMap(OsmMapPtr map, const bool removeNodes) { // if an element has a uuid, but no REF1/REF2 tag then create a REF tag with the uuid. The // 1/2 is determined by the unknown status. ConvertUuidToRefVisitor convertUuidToRef; map->visitRw(convertUuidToRef); // #5891 if the feature is marked as todo then there is no need to conflate & evaluate it. shared_ptr<TagCriterion> isTodo(new TagCriterion("REF2", "todo")); RemoveElementsVisitor remover(isTodo); remover.setRecursive(true); map->visitRw(remover); // add a uuid to all elements with a REF tag. HasTagCriterion criterion("REF1", "REF2", "REVIEW"); AddUuidVisitor uuid("uuid"); FilteredVisitor v(criterion, uuid); map->visitRw(v); if (removeNodes) { // remove all REF1/REF2 tags from the nodes. RemoveTagVisitor removeRef("REF1", "REF2"); IsNodeFilter nodeFilter(Filter::KeepMatches); FilteredVisitor removeRefV(nodeFilter, removeRef); map->visitRw(removeRefV); } //MapCleaner().apply(map); }
void removeWeakRef(const void* id) { if (!mRetain) { removeRef(&mWeakRefs, id); } else { addRef(&mWeakRefs, id, -mWeak); } }
void removeWeakRef(const void* id) { if (!mRetain) { removeRef(&mWeakRefs, id); } else { addRef(&mWeakRefs, id, -mWeak.load(std::memory_order_relaxed)); } }
void removeStrongRef(const void* id) { //ALOGD_IF(mTrackEnabled, // "removeStrongRef: RefBase=%p, id=%p", mBase, id); if (!mRetain) { removeRef(&mStrongRefs, id); } else { addRef(&mStrongRefs, id, -mStrong); } }
void removeStrongRef(const void* id) { //ALOGD_IF(mTrackEnabled, // "removeStrongRef: RefBase=%p, id=%p", mBase, id); if (!mRetain) { removeRef(&mStrongRefs, id); } else { addRef(&mStrongRefs, id, -mStrong.load(std::memory_order_relaxed)); } }
OSCL_EXPORT_REF void OsclMemPoolFixedChunkAllocator::deallocate(OsclAny* p) { if (iMemPool == NULL) { // Memory pool hasn't been allocated yet so error OSCL_LEAVE(OsclErrNotReady); } uint8* ptmp = (uint8*)p; uint8* mptmp = (uint8*)iMemPoolAligned; if ((ptmp < mptmp) || ptmp >= (mptmp + iNumChunk*iChunkSizeMemAligned)) { // Returned memory is not part of this memory pool OSCL_LEAVE(OsclErrArgument); } if (((ptmp - mptmp) % iChunkSizeMemAligned) != 0) { // Returned memory is not aligned to the chunk. OSCL_LEAVE(OsclErrArgument); } #if(!OSCL_BYPASS_MEMMGT) // check if the same chunk was deallocated multiple times in a row uint32 ii; for (ii = 0; ii < iFreeMemChunkList.size(); ii++) { if (iFreeMemChunkList[ii] == p) { OSCL_LEAVE(OsclErrArgument); } } #endif // Put the returned chunk in the free pool iFreeMemChunkList.push_back(p); // Notify the observer about free chunk available if waiting for such callback if (iCheckNextAvailableFreeChunk) { iCheckNextAvailableFreeChunk = false; if (iObserver) { iObserver->freechunkavailable(iNextAvailableContextData); } } // Decrement the refcount since deallocating succeeded removeRef(); }
CoreAttributesList::~CoreAttributesList() { if (autoDelete()) { /* We need to make sure that the CoreAttributes are first removed from * the list and then deleted. */ setAutoDelete(false); while (!isEmpty()) { CoreAttributes* tp = getFirst(); removeRef(tp); delete tp; } setAutoDelete(true); } }
void eServicePlaylistHandler::enterDirectory(const eServiceReference &dir, Signal1<void,const eServiceReference&> &callback) { if (dir.type == id) // for playlists in other playlists.. { ePlaylist *service=(ePlaylist*)addRef(dir); if (!service) return; for (std::list<ePlaylistEntry>::const_iterator i(service->getConstList().begin()); i != service->getConstList().end(); ++i) callback(*i); removeRef(dir); return; } // for playlists in any other root.. but not in another playlist.. std::pair<std::multimap<eServiceReference,eServiceReference>::const_iterator,std::multimap<eServiceReference,eServiceReference>::const_iterator> range=playlists.equal_range(dir); while (range.first != range.second) { callback(range.first->second); ++range.first; } }
void _DelayedReleaser::update(float dt) { std::vector<cocos2d::Ref *> releaseFrames; auto itFrames = _frames.begin(); while (itFrames != _frames.end()) { if (itFrames->first->getReferenceCount() == 1) { if (itFrames->second == 0) { releaseFrames.push_back(itFrames->first); } else { itFrames->second --; } } itFrames ++; } for (auto &it : releaseFrames) { removeRef(it); _frames.erase(it); it->release(); } releaseFrames.clear(); if (_frames.empty()) { unregisterWithDispatcher(); } }
PooledString::Ref::~Ref(void) { removeRef(); }
void PooledString::Ref::clear(void) { removeRef(); }
OSCL_EXPORT_REF void OsclMemPoolResizableAllocator::deallocate(OsclAny* aPtr) { // Check that the returned pointer is from the memory pool if (validateblock(aPtr) == false) { OSCL_LEAVE(OsclErrArgument); } // Retrieve the block info header and validate the info uint8* byteptr = (uint8*)aPtr; MemPoolBlockInfo* retblock = (MemPoolBlockInfo*)(byteptr - iBlockInfoAlignedSize); OSCL_ASSERT(retblock != NULL); OSCL_ASSERT(retblock->iBlockPreFence == OSCLMEMPOOLRESIZABLEALLOCATOR_PREFENCE_PATTERN); OSCL_ASSERT(retblock->iBlockPostFence == OSCLMEMPOOLRESIZABLEALLOCATOR_POSTFENCE_PATTERN); // Return the block to the memory pool buffer deallocateblock(*retblock); --(retblock->iParentBuffer->iNumOutstanding); // Check if user needs to be notified when block becomes available if (iCheckNextAvailable) { // Check if user is waiting for certain size if (iRequestedNextAvailableSize == 0) { // No so just make the callback iCheckNextAvailable = false; if (iObserver) { iObserver->freeblockavailable(iNextAvailableContextData); } } else { // Check if the requested size is available now if (findfreeblock(iRequestedNextAvailableSize + iBlockInfoAlignedSize) != NULL) { iCheckNextAvailable = false; if (iObserver) { iObserver->freeblockavailable(iNextAvailableContextData); } } else if (iRequestedNextAvailableSize > iMemPoolBufferSize) { // The requested size is bigger than the set buffer size // Check if there is space to grow the buffer, if (iMemPoolBufferNumLimit == 0 || iMemPoolBufferList.size() < iMemPoolBufferNumLimit) { // Available iCheckNextAvailable = false; if (iObserver) { iObserver->freeblockavailable(iNextAvailableContextData); } } else { // Not available so see if there is a buffer with // no outstanding buffers which can be destroyed // in the next allocate() call. bool emptybufferfound = false; for (uint32 j = 0; j < iMemPoolBufferList.size(); ++j) { if (iMemPoolBufferList[j]->iNumOutstanding == 0) { emptybufferfound = true; break; } } if (emptybufferfound) { iCheckNextAvailable = false; if (iObserver) { iObserver->freeblockavailable(iNextAvailableContextData); } } } } } } if (iCheckFreeMemoryAvailable) { if (iRequestedAvailableFreeMemSize == 0) { // No so just make the callback iCheckFreeMemoryAvailable = false; if (iFreeMemPoolObserver) { iFreeMemPoolObserver->freememoryavailable(iFreeMemContextData); } } else { // Check if the requested size is available now if (getAvailableSize() >= iRequestedAvailableFreeMemSize) { iCheckFreeMemoryAvailable = false; if (iFreeMemPoolObserver) { iFreeMemPoolObserver->freememoryavailable(iFreeMemContextData); } } } } // Decrement the refcount since deallocating succeeded removeRef(); }
// if ref count is 0, free the memory created by the int and destory the object ~SmartPointer(SmartPointer<T> &sptr) { removeRef(); }