CFTypeRef CFMakeCollectable(CFTypeRef cf) { if (NULL == cf) return NULL; if (CF_IS_COLLECTABLE(cf)) { #if defined(DEBUG) CFAllocatorRef allocator = CFGetAllocator(cf); if (!CF_IS_COLLECTABLE_ALLOCATOR(allocator)) { CFLog(kCFLogLevelWarning, CFSTR("object %p with non-GC allocator %p passed to CFMakeCollectable."), cf, allocator); HALT; } #endif if (!CFTYPE_IS_OBJC(cf)) { CFRuntimeClass *cfClass = __CFRuntimeClassTable[__CFGenericTypeID_inline(cf)]; if (cfClass->version & (_kCFRuntimeResourcefulObject)) { // don't allow the collector to manage uncollectable objects. CFLog(kCFLogLevelWarning, CFSTR("uncollectable object %p passed to CFMakeCollectable."), cf); HALT; } } if (auto_zone_retain_count(__CFCollectableZone, cf) == 0) { CFLog(kCFLogLevelWarning, CFSTR("object %p with 0 retain-count passed to CFMakeCollectable."), cf); return cf; } auto_zone_release(__CFCollectableZone, (void *)cf); } return cf; }
CFIndex _CFGetRetainCount(CFTypeRef cf) { if (NULL == cf) return 0; if (CF_IS_COLLECTABLE(cf)) { return auto_zone_retain_count(__CFCollectableZone, cf); } uint64_t rc = __CFGetFullRetainCount(cf); return (rc < (uint64_t)LONG_MAX) ? (CFIndex)rc : (CFIndex)LONG_MAX; }
void _CFReleaseGC(CFTypeRef cf) { #if defined(DEBUG) if (CF_USING_COLLECTABLE_MEMORY && !CF_IS_COLLECTABLE(cf)) { fprintf(stderr, "non-auto object %p passed to _CFReleaseGC.\n", cf); HALT; } #endif if (!CF_USING_COLLECTABLE_MEMORY) CFRelease(cf); }
CFTypeRef _CFRetainGC(CFTypeRef cf) { #if defined(DEBUG) if (CF_USING_COLLECTABLE_MEMORY && !CF_IS_COLLECTABLE(cf)) { fprintf(stderr, "non-auto object %p passed to _CFRetainGC.\n", cf); HALT; } #endif return CF_USING_COLLECTABLE_MEMORY ? cf : CFRetain(cf); }
CFIndex CFGetRetainCount(CFTypeRef cf) { if (NULL == cf) return 0; if (CF_IS_COLLECTABLE(cf)) { return auto_zone_retain_count(__CFCollectableZone, cf); } CFTYPE_OBJC_FUNCDISPATCH0(CFIndex, cf, "retainCount"); __CFGenericAssertIsCF(cf); uint64_t rc = __CFGetFullRetainCount(cf); return (rc < (uint64_t)LONG_MAX) ? (CFIndex)rc : (CFIndex)LONG_MAX; }
CFTypeRef CFRetain(CFTypeRef cf) { if (CF_IS_COLLECTABLE(cf)) { // always honor CFRetain's with a GC-visible retain. auto_zone_retain(__CFCollectableZone, (void*)cf); return cf; } CFTYPE_OBJC_FUNCDISPATCH0(CFTypeRef, cf, "retain"); if (cf) __CFGenericAssertIsCF(cf); return _CFRetain(cf); }
void CFClass::finalizeType(CFTypeRef cf) throw() { /* Why are we asserting the mutex here as well as in refCountForType? Because the way we control the objects and the queues are different under GC than they are under non-GC operations. In non-GC, we need to control the lifetime of the object. This means that the cache lock has to be asserted while we are determining if the object should live or die. The mutex is recursive, which means that we won't end up with mutex inversion. In GC, GC figures out the lifetime of the object. We probably don't need to assert the mutex here, but it doesn't hurt. */ SecCFObject *obj = SecCFObject::optional(cf); bool isCollectable = CF_IS_COLLECTABLE(cf); try { Mutex* mutex = obj->getMutexForObject(); if (mutex == NULL) { // if the object didn't have a mutex, it wasn't cached. // Just clean it up and get out. obj->aboutToDestruct(); // removes the object from its associated cache. } else { StLock<Mutex> _(*mutex); if (obj->isNew()) { // New objects aren't in the cache. // Just clean it up and get out. obj->aboutToDestruct(); // removes the object from its associated cache. return; } obj->aboutToDestruct(); // removes the object from its associated cache. } } catch(...) { } if (isCollectable) { delete obj; } }
void SecCFObject::operator delete(void *object) throw() { CFTypeRef cfType = reinterpret_cast<CFTypeRef>(reinterpret_cast<const uint8_t *>(object) - kAlignedRuntimeSize); if (CF_IS_COLLECTABLE(cfType)) { return; } CFAllocatorRef allocator = CFGetAllocator(cfType); CFAllocatorDeallocate(allocator, (void*) cfType); }
void CFRelease(CFTypeRef cf) { #if !DEPLOYMENT_TARGET_WINDOWS if (CF_IS_COLLECTABLE(cf)) { // release the GC-visible reference. if (auto_zone_release(__CFCollectableZone, (void*)cf) == 0 && !CFTYPE_IS_OBJC(cf)) { CFRuntimeClass *cfClass = __CFRuntimeClassTable[__CFGenericTypeID_inline(cf)]; if (cfClass->version & _kCFRuntimeResourcefulObject) { if (cfClass->reclaim) cfClass->reclaim(cf); } } return; } #endif CFTYPE_OBJC_FUNCDISPATCH0(void, cf, "release"); if (cf) __CFGenericAssertIsCF(cf); _CFRelease(cf); }
uint32_t CFClass::cleanupObject(intptr_t op, CFTypeRef cf, bool &zap) { // the default is to not throw away the object zap = false; bool isGC = CF_IS_COLLECTABLE(cf); uint32_t currentCount; SecCFObject *obj = SecCFObject::optional(cf); uint32_t oldCount; currentCount = obj->updateRetainCount(op, &oldCount); if (isGC) { auto_zone_t* zone = objc_collectableZone(); if (op == -1 && oldCount == 0) { auto_zone_release(zone, (void*) cf); } else if (op == 1 && oldCount == 0 && currentCount == 1) { auto_zone_retain(zone, (void*) cf); } else if (op == -1 && oldCount == 1 && currentCount == 0) { /* To prevent accidental resurrection, just pull it out of the cache. */ obj->aboutToDestruct(); auto_zone_release(zone, (void*) cf); } else if (op == 0) { return currentCount; } return 0; } if (op == 0) { return currentCount; } else if (currentCount == 0) { // we may not be able to delete if the caller has active children if (obj->mayDelete()) { finalizeType(cf); zap = true; // ask the caller to release the mutex and zap the object return 0; } else { return currentCount; } } else { return 0; } }