__private_extern__ void __CFTypeCollectionRelease(CFAllocatorRef allocator, const void *ptr) { CFTypeRef cf = (CFTypeRef)ptr; // only collections allocated in the GC zone can opt-out of reference counting. if (CF_IS_COLLECTABLE_ALLOCATOR(allocator)) { if (CFTYPE_IS_OBJC(cf)) return; // do nothing for OBJC objects. if (auto_zone_is_valid_pointer(__CFCollectableZone, cf)) { #if !DEPLOYMENT_TARGET_WINDOWS // GC: If this a CF object in the GC heap that is marked uncollectable, then // must balance the retain done in __CFTypeCollectionRetain(). // We're basically inlining CFRelease() here, to avoid an extra heap membership test. CFRuntimeClass *cfClass = __CFRuntimeClassTable[__CFGenericTypeID_inline(cf)]; if (cfClass->version & _kCFRuntimeResourcefulObject && auto_zone_release(__CFCollectableZone, (void*)cf) == 0) { // ResourceFull objects trigger 'reclaim' on transition to zero if (cfClass->reclaim) cfClass->reclaim(cf); } else // avoid releasing normal CF objects. Like other collections, for example ; return; #endif } else { // support constant CFTypeRef objects. #if __LP64__ uint32_t lowBits = ((CFRuntimeBase *)cf)->_rc; #else uint32_t lowBits = ((CFRuntimeBase *)cf)->_cfinfo[CF_RC_BITS]; #endif if (lowBits == 0) return; } } CFRelease(cf); }
CFTypeRef CFMakeCollectable(CFTypeRef cf) { if (NULL == cf) return NULL; if (CF_IS_COLLECTABLE(cf)) { #if defined(DEBUG) CFAllocatorRef allocator = CFGetAllocator(cf); if (!CF_IS_COLLECTABLE_ALLOCATOR(allocator)) { CFLog(kCFLogLevelWarning, CFSTR("object %p with non-GC allocator %p passed to CFMakeCollectable."), cf, allocator); HALT; } #endif if (!CFTYPE_IS_OBJC(cf)) { CFRuntimeClass *cfClass = __CFRuntimeClassTable[__CFGenericTypeID_inline(cf)]; if (cfClass->version & (_kCFRuntimeResourcefulObject)) { // don't allow the collector to manage uncollectable objects. CFLog(kCFLogLevelWarning, CFSTR("uncollectable object %p passed to CFMakeCollectable."), cf); HALT; } } if (auto_zone_retain_count(__CFCollectableZone, cf) == 0) { CFLog(kCFLogLevelWarning, CFSTR("object %p with 0 retain-count passed to CFMakeCollectable."), cf); return cf; } auto_zone_release(__CFCollectableZone, (void *)cf); } return cf; }
void CFRelease(CFTypeRef cf) { #if !DEPLOYMENT_TARGET_WINDOWS if (CF_IS_COLLECTABLE(cf)) { // release the GC-visible reference. if (auto_zone_release(__CFCollectableZone, (void*)cf) == 0 && !CFTYPE_IS_OBJC(cf)) { CFRuntimeClass *cfClass = __CFRuntimeClassTable[__CFGenericTypeID_inline(cf)]; if (cfClass->version & _kCFRuntimeResourcefulObject) { if (cfClass->reclaim) cfClass->reclaim(cf); } } return; } #endif CFTYPE_OBJC_FUNCDISPATCH0(void, cf, "release"); if (cf) __CFGenericAssertIsCF(cf); _CFRelease(cf); }
// NULL bytesDeallocator to this function does not mean the default allocator, it means // that there should be no deallocator, and the bytes should be copied. static CFMutableDataRef __CFDataInit(CFAllocatorRef allocator, CFOptionFlags flags, CFIndex capacity, const uint8_t *bytes, CFIndex length, CFAllocatorRef bytesDeallocator) { CFMutableDataRef memory; __CFGenericValidateMutabilityFlags(flags); CFAssert2(0 <= capacity, __kCFLogAssertion, "%s(): capacity (%d) cannot be less than zero", __PRETTY_FUNCTION__, capacity); CFAssert3(kCFFixedMutable != __CFMutableVarietyFromFlags(flags) || length <= capacity, __kCFLogAssertion, "%s(): for kCFFixedMutable type, capacity (%d) must be greater than or equal to number of initial elements (%d)", __PRETTY_FUNCTION__, capacity, length); CFAssert2(0 <= length, __kCFLogAssertion, "%s(): length (%d) cannot be less than zero", __PRETTY_FUNCTION__, length); Boolean collectableMemory = CF_IS_COLLECTABLE_ALLOCATOR(allocator); Boolean noCopy = bytesDeallocator != NULL; Boolean isMutable = ((flags & __kCFMutable) != 0); Boolean isGrowable = ((flags & __kCFGrowable) != 0); Boolean allocateInline = !isGrowable && !noCopy && capacity < INLINE_BYTES_THRESHOLD; allocator = (allocator == NULL) ? __CFGetDefaultAllocator() : allocator; Boolean useAllocator = (allocator != kCFAllocatorSystemDefault && allocator != kCFAllocatorMalloc && allocator != kCFAllocatorMallocZone); CFIndex size = sizeof(struct __CFData) - sizeof(CFRuntimeBase); if (allocateInline) { size += sizeof(uint8_t) * __CFDataNumBytesForCapacity(capacity) + sizeof(uint8_t) * 15; // for 16-byte alignment fixup } memory = (CFMutableDataRef)_CFRuntimeCreateInstance(allocator, __kCFDataTypeID, size, NULL); if (NULL == memory) { return NULL; } __CFDataSetNumBytesUsed(memory, 0); __CFDataSetLength(memory, 0); __CFDataSetInfoBits(memory, (allocateInline ? __kCFBytesInline : 0) | (useAllocator ? __kCFUseAllocator : 0) | (collectableMemory ? __kCFAllocatesCollectable : 0)); BOOL finalize = YES; BOOL scan = YES; if (collectableMemory) { if (allocateInline) { // We have no pointer to anything that needs to be reclaimed, so don't scan or finalize. scan = NO; finalize = NO; } else if (noCopy) { if (CF_IS_COLLECTABLE_ALLOCATOR(bytesDeallocator)) { // We're taking responsibility for externally GC-allocated memory, so scan us, but we don't need to finalize. finalize = NO; } else if (bytesDeallocator == kCFAllocatorNull) { // We don't have responsibility for these bytes, so there's no need to be scanned and we don't need to finalize. scan = NO; finalize = NO; } else { // We have a pointer to non-GC-allocated memory, so don't scan, but do finalize. scan = NO; } } if (!scan) auto_zone_set_unscanned(objc_collectableZone(), memory); if (!finalize) auto_zone_set_nofinalize(objc_collectableZone(), memory); } if (isMutable && isGrowable) { __CFDataSetCapacity(memory, __CFDataRoundUpCapacity(1)); __CFDataSetNumBytes(memory, __CFDataNumBytesForCapacity(__CFDataRoundUpCapacity(1))); __CFSetMutableVariety(memory, kCFMutable); } else { /* Don't round up capacity */ __CFDataSetCapacity(memory, capacity); __CFDataSetNumBytes(memory, __CFDataNumBytesForCapacity(capacity)); __CFSetMutableVariety(memory, kCFFixedMutable); } if (noCopy) { __CFAssignWithWriteBarrier((void **)&memory->_bytes, (uint8_t *)bytes); if (finalize) { if (_CFAllocatorIsGCRefZero(bytesDeallocator)) { memory->_bytesDeallocator = bytesDeallocator; } else { memory->_bytesDeallocator = (CFAllocatorRef)CFRetain(_CFConvertAllocatorToNonGCRefZeroEquivalent(bytesDeallocator)); } } if (CF_IS_COLLECTABLE_ALLOCATOR(bytesDeallocator) && !_CFAllocatorIsGCRefZero(bytesDeallocator)) { // When given a GC allocator which is not one of the GCRefZero ones as the deallocator, we assume that the no-copy memory is GC-allocated with a retain count of (at least) 1 and we should release it now instead of waiting until __CFDataDeallocate. auto_zone_release(objc_collectableZone(), memory->_bytes); } __CFDataSetNumBytesUsed(memory, length); __CFDataSetLength(memory, length); // Mutable no-copy datas are not allowed, so don't bother setting needsToZero flag. } else { Boolean cleared = (isMutable && !isGrowable && !_CFExecutableLinkedOnOrAfter(CFSystemVersionSnowLeopard)); if (!allocateInline) { // assume that allocators give 16-byte aligned memory back -- it is their responsibility __CFAssignWithWriteBarrier((void **)&memory->_bytes, __CFDataAllocate(memory, __CFDataNumBytes(memory) * sizeof(uint8_t), cleared)); if (__CFOASafe) __CFSetLastAllocationEventName(memory->_bytes, "CFData (store)"); if (NULL == memory->_bytes) { CFRelease(memory); return NULL; } } else { if (length == 0 && !isMutable) { // NSData sets its bytes pointer to NULL when its length is zero. Starting in 10.7 we do the same for CFData. memory->_bytes = NULL; // It is important to set this data as not inlined, so we do not recalculate a bytes pointer from null. __CFDataSetInline(memory, false); } cleared = true; } __CFDataSetNeedsToZero(memory, !cleared); memory->_bytesDeallocator = NULL; CFDataReplaceBytes(memory, CFRangeMake(0, 0), bytes, length); } __CFSetMutableVariety(memory, __CFMutableVarietyFromFlags(flags)); return memory; }
uint32_t CFClass::cleanupObject(intptr_t op, CFTypeRef cf, bool &zap) { // the default is to not throw away the object zap = false; bool isGC = CF_IS_COLLECTABLE(cf); uint32_t currentCount; SecCFObject *obj = SecCFObject::optional(cf); uint32_t oldCount; currentCount = obj->updateRetainCount(op, &oldCount); if (isGC) { auto_zone_t* zone = objc_collectableZone(); if (op == -1 && oldCount == 0) { auto_zone_release(zone, (void*) cf); } else if (op == 1 && oldCount == 0 && currentCount == 1) { auto_zone_retain(zone, (void*) cf); } else if (op == -1 && oldCount == 1 && currentCount == 0) { /* To prevent accidental resurrection, just pull it out of the cache. */ obj->aboutToDestruct(); auto_zone_release(zone, (void*) cf); } else if (op == 0) { return currentCount; } return 0; } if (op == 0) { return currentCount; } else if (currentCount == 0) { // we may not be able to delete if the caller has active children if (obj->mayDelete()) { finalizeType(cf); zap = true; // ask the caller to release the mutex and zap the object return 0; } else { return currentCount; } } else { return 0; } }