__private_extern__ void __CFAllocatorInitialize(void) { __kCFAllocatorTypeID = _CFRuntimeRegisterClass(&__CFAllocatorClass); _CFRuntimeSetInstanceTypeID(&__kCFAllocatorSystemDefault, __kCFAllocatorTypeID); __kCFAllocatorSystemDefault._base._cfisa = __CFISAForTypeID(__kCFAllocatorTypeID); #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_EMBEDDED_MINI __kCFAllocatorSystemDefault._context.info = (kCFUseCollectableAllocator ? objc_collectableZone() : malloc_default_zone()); #endif __kCFAllocatorSystemDefault._allocator = kCFAllocatorSystemDefault; _CFRuntimeSetInstanceTypeID(&__kCFAllocatorMalloc, __kCFAllocatorTypeID); __kCFAllocatorMalloc._base._cfisa = __CFISAForTypeID(__kCFAllocatorTypeID); __kCFAllocatorMalloc._allocator = kCFAllocatorSystemDefault; #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_EMBEDDED_MINI _CFRuntimeSetInstanceTypeID(&__kCFAllocatorMallocZone, __kCFAllocatorTypeID); __kCFAllocatorMallocZone._base._cfisa = __CFISAForTypeID(__kCFAllocatorTypeID); __kCFAllocatorMallocZone._allocator = kCFAllocatorSystemDefault; __kCFAllocatorMallocZone._context.info = malloc_default_zone(); #endif _CFRuntimeSetInstanceTypeID(&__kCFAllocatorNull, __kCFAllocatorTypeID); __kCFAllocatorNull._base._cfisa = __CFISAForTypeID(__kCFAllocatorTypeID); __kCFAllocatorNull._allocator = kCFAllocatorSystemDefault; }
void Init_PreGC(void) { auto_collection_control_t *control; #if MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 __auto_zone = objc_collectableZone(); #else __auto_zone = auto_zone(); #endif if (__auto_zone == NULL) { rb_objc_no_gc_error(); } __nsobject = (void *)objc_getClass("NSObject"); control = auto_collection_parameters(__auto_zone); if (getenv("GC_DEBUG")) { control->log = AUTO_LOG_COLLECTIONS | AUTO_LOG_REGIONS | AUTO_LOG_UNUSUAL; } if (getenv("GC_DISABLE")) { gc_disabled = true; auto_collector_disable(__auto_zone); } }
static CFArrayRef __CFArrayInit(CFAllocatorRef allocator, UInt32 flags, CFIndex capacity, const CFArrayCallBacks *callBacks) { struct __CFArray *memory; UInt32 size; __CFBitfieldSetValue(flags, 31, 2, 0); if (CF_IS_COLLECTABLE_ALLOCATOR(allocator)) { if (!callBacks || (callBacks->retain == NULL && callBacks->release == NULL)) { __CFBitfieldSetValue(flags, 4, 4, 1); // setWeak } } if (__CFArrayCallBacksMatchNull(callBacks)) { __CFBitfieldSetValue(flags, 3, 2, __kCFArrayHasNullCallBacks); } else if (__CFArrayCallBacksMatchCFType(callBacks)) { __CFBitfieldSetValue(flags, 3, 2, __kCFArrayHasCFTypeCallBacks); } else { __CFBitfieldSetValue(flags, 3, 2, __kCFArrayHasCustomCallBacks); } size = __CFArrayGetSizeOfType(flags) - sizeof(CFRuntimeBase); switch (__CFBitfieldGetValue(flags, 1, 0)) { case __kCFArrayImmutable: size += capacity * sizeof(struct __CFArrayBucket); break; case __kCFArrayDeque: break; } memory = (struct __CFArray*)_CFRuntimeCreateInstance(allocator, __kCFArrayTypeID, size, NULL); if (NULL == memory) { return NULL; } __CFBitfieldSetValue(memory->_base._cfinfo[CF_INFO_BITS], 6, 0, flags); __CFArraySetCount((CFArrayRef)memory, 0); switch (__CFBitfieldGetValue(flags, 1, 0)) { case __kCFArrayImmutable: if (isWeakMemory(memory)) { // if weak, don't scan auto_zone_set_unscanned(objc_collectableZone(), memory); } if (__CFOASafe) __CFSetLastAllocationEventName(memory, "CFArray (immutable)"); break; case __kCFArrayDeque: if (__CFOASafe) __CFSetLastAllocationEventName(memory, "CFArray (mutable-variable)"); ((struct __CFArray *)memory)->_mutations = 1; ((struct __CFArray *)memory)->_mutInProgress = 0; ((struct __CFArray*)memory)->_store = NULL; break; } if (__kCFArrayHasCustomCallBacks == __CFBitfieldGetValue(flags, 3, 2)) { CFArrayCallBacks *cb = (CFArrayCallBacks *)__CFArrayGetCallBacks((CFArrayRef)memory); *cb = *callBacks; FAULT_CALLBACK((void **)&(cb->retain)); FAULT_CALLBACK((void **)&(cb->release)); FAULT_CALLBACK((void **)&(cb->copyDescription)); FAULT_CALLBACK((void **)&(cb->equal)); } return (CFArrayRef)memory; }
// Check __CFDataShouldAllocateCleared before passing true. static void *__CFDataAllocate(CFDataRef data, CFIndex size, Boolean clear) { void *bytes = NULL; if (__CFDataUseAllocator(data)) { CFAllocatorRef allocator = __CFGetAllocator(data); bytes = CFAllocatorAllocate(allocator, size, 0); if (clear) memset((uint8_t *)bytes, 0, size); } else { if (__CFDataAllocatesCollectable(data)) { bytes = auto_zone_allocate_object(objc_collectableZone(), size, AUTO_MEMORY_UNSCANNED, 0, clear); } else { if (clear) { bytes = calloc(1, size); } else { bytes = malloc(size); } } } return bytes; }
CF_PRIVATE void __CFAllocatorInitialize(void) { static dispatch_once_t initOnce = 0; dispatch_once(&initOnce, ^{ __kCFAllocatorTypeID = _CFRuntimeRegisterClass(&__CFAllocatorClass); // initOnce covered _CFAllocatorSetInstanceTypeIDAndIsa(&__kCFAllocatorSystemDefault); #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_EMBEDDED_MINI __kCFAllocatorSystemDefault._context.info = (kCFUseCollectableAllocator ? objc_collectableZone() : malloc_default_zone()); #endif __kCFAllocatorSystemDefault._allocator = kCFAllocatorSystemDefault; _CFAllocatorSetInstanceTypeIDAndIsa(&__kCFAllocatorMalloc); __kCFAllocatorMalloc._allocator = kCFAllocatorSystemDefault; #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_EMBEDDED_MINI _CFAllocatorSetInstanceTypeIDAndIsa(&__kCFAllocatorMallocZone); __kCFAllocatorMallocZone._allocator = kCFAllocatorSystemDefault; __kCFAllocatorMallocZone._context.info = malloc_default_zone(); #endif _CFAllocatorSetInstanceTypeIDAndIsa(&__kCFAllocatorNull); __kCFAllocatorNull._allocator = kCFAllocatorSystemDefault; });
// NULL bytesDeallocator to this function does not mean the default allocator, it means // that there should be no deallocator, and the bytes should be copied. static CFMutableDataRef __CFDataInit(CFAllocatorRef allocator, CFOptionFlags flags, CFIndex capacity, const uint8_t *bytes, CFIndex length, CFAllocatorRef bytesDeallocator) { CFMutableDataRef memory; __CFGenericValidateMutabilityFlags(flags); CFAssert2(0 <= capacity, __kCFLogAssertion, "%s(): capacity (%d) cannot be less than zero", __PRETTY_FUNCTION__, capacity); CFAssert3(kCFFixedMutable != __CFMutableVarietyFromFlags(flags) || length <= capacity, __kCFLogAssertion, "%s(): for kCFFixedMutable type, capacity (%d) must be greater than or equal to number of initial elements (%d)", __PRETTY_FUNCTION__, capacity, length); CFAssert2(0 <= length, __kCFLogAssertion, "%s(): length (%d) cannot be less than zero", __PRETTY_FUNCTION__, length); Boolean collectableMemory = CF_IS_COLLECTABLE_ALLOCATOR(allocator); Boolean noCopy = bytesDeallocator != NULL; Boolean isMutable = ((flags & __kCFMutable) != 0); Boolean isGrowable = ((flags & __kCFGrowable) != 0); Boolean allocateInline = !isGrowable && !noCopy && capacity < INLINE_BYTES_THRESHOLD; allocator = (allocator == NULL) ? __CFGetDefaultAllocator() : allocator; Boolean useAllocator = (allocator != kCFAllocatorSystemDefault && allocator != kCFAllocatorMalloc && allocator != kCFAllocatorMallocZone); CFIndex size = sizeof(struct __CFData) - sizeof(CFRuntimeBase); if (allocateInline) { size += sizeof(uint8_t) * __CFDataNumBytesForCapacity(capacity) + sizeof(uint8_t) * 15; // for 16-byte alignment fixup } memory = (CFMutableDataRef)_CFRuntimeCreateInstance(allocator, __kCFDataTypeID, size, NULL); if (NULL == memory) { return NULL; } __CFDataSetNumBytesUsed(memory, 0); __CFDataSetLength(memory, 0); __CFDataSetInfoBits(memory, (allocateInline ? __kCFBytesInline : 0) | (useAllocator ? __kCFUseAllocator : 0) | (collectableMemory ? __kCFAllocatesCollectable : 0)); BOOL finalize = YES; BOOL scan = YES; if (collectableMemory) { if (allocateInline) { // We have no pointer to anything that needs to be reclaimed, so don't scan or finalize. scan = NO; finalize = NO; } else if (noCopy) { if (CF_IS_COLLECTABLE_ALLOCATOR(bytesDeallocator)) { // We're taking responsibility for externally GC-allocated memory, so scan us, but we don't need to finalize. finalize = NO; } else if (bytesDeallocator == kCFAllocatorNull) { // We don't have responsibility for these bytes, so there's no need to be scanned and we don't need to finalize. scan = NO; finalize = NO; } else { // We have a pointer to non-GC-allocated memory, so don't scan, but do finalize. scan = NO; } } if (!scan) auto_zone_set_unscanned(objc_collectableZone(), memory); if (!finalize) auto_zone_set_nofinalize(objc_collectableZone(), memory); } if (isMutable && isGrowable) { __CFDataSetCapacity(memory, __CFDataRoundUpCapacity(1)); __CFDataSetNumBytes(memory, __CFDataNumBytesForCapacity(__CFDataRoundUpCapacity(1))); __CFSetMutableVariety(memory, kCFMutable); } else { /* Don't round up capacity */ __CFDataSetCapacity(memory, capacity); __CFDataSetNumBytes(memory, __CFDataNumBytesForCapacity(capacity)); __CFSetMutableVariety(memory, kCFFixedMutable); } if (noCopy) { __CFAssignWithWriteBarrier((void **)&memory->_bytes, (uint8_t *)bytes); if (finalize) { if (_CFAllocatorIsGCRefZero(bytesDeallocator)) { memory->_bytesDeallocator = bytesDeallocator; } else { memory->_bytesDeallocator = (CFAllocatorRef)CFRetain(_CFConvertAllocatorToNonGCRefZeroEquivalent(bytesDeallocator)); } } if (CF_IS_COLLECTABLE_ALLOCATOR(bytesDeallocator) && !_CFAllocatorIsGCRefZero(bytesDeallocator)) { // When given a GC allocator which is not one of the GCRefZero ones as the deallocator, we assume that the no-copy memory is GC-allocated with a retain count of (at least) 1 and we should release it now instead of waiting until __CFDataDeallocate. auto_zone_release(objc_collectableZone(), memory->_bytes); } __CFDataSetNumBytesUsed(memory, length); __CFDataSetLength(memory, length); // Mutable no-copy datas are not allowed, so don't bother setting needsToZero flag. } else { Boolean cleared = (isMutable && !isGrowable && !_CFExecutableLinkedOnOrAfter(CFSystemVersionSnowLeopard)); if (!allocateInline) { // assume that allocators give 16-byte aligned memory back -- it is their responsibility __CFAssignWithWriteBarrier((void **)&memory->_bytes, __CFDataAllocate(memory, __CFDataNumBytes(memory) * sizeof(uint8_t), cleared)); if (__CFOASafe) __CFSetLastAllocationEventName(memory->_bytes, "CFData (store)"); if (NULL == memory->_bytes) { CFRelease(memory); return NULL; } } else { if (length == 0 && !isMutable) { // NSData sets its bytes pointer to NULL when its length is zero. Starting in 10.7 we do the same for CFData. memory->_bytes = NULL; // It is important to set this data as not inlined, so we do not recalculate a bytes pointer from null. __CFDataSetInline(memory, false); } cleared = true; } __CFDataSetNeedsToZero(memory, !cleared); memory->_bytesDeallocator = NULL; CFDataReplaceBytes(memory, CFRangeMake(0, 0), bytes, length); } __CFSetMutableVariety(memory, __CFMutableVarietyFromFlags(flags)); return memory; }
uint32_t CFClass::cleanupObject(intptr_t op, CFTypeRef cf, bool &zap) { // the default is to not throw away the object zap = false; bool isGC = CF_IS_COLLECTABLE(cf); uint32_t currentCount; SecCFObject *obj = SecCFObject::optional(cf); uint32_t oldCount; currentCount = obj->updateRetainCount(op, &oldCount); if (isGC) { auto_zone_t* zone = objc_collectableZone(); if (op == -1 && oldCount == 0) { auto_zone_release(zone, (void*) cf); } else if (op == 1 && oldCount == 0 && currentCount == 1) { auto_zone_retain(zone, (void*) cf); } else if (op == -1 && oldCount == 1 && currentCount == 0) { /* To prevent accidental resurrection, just pull it out of the cache. */ obj->aboutToDestruct(); auto_zone_release(zone, (void*) cf); } else if (op == 0) { return currentCount; } return 0; } if (op == 0) { return currentCount; } else if (currentCount == 0) { // we may not be able to delete if the caller has active children if (obj->mayDelete()) { finalizeType(cf); zap = true; // ask the caller to release the mutex and zap the object return 0; } else { return currentCount; } } else { return 0; } }
__private_extern__ CFArrayRef __CFArrayCreateCopy0(CFAllocatorRef allocator, CFArrayRef array) { CFArrayRef result; const CFArrayCallBacks *cb; struct __CFArrayBucket *buckets; CFAllocatorRef bucketsAllocator; void* bucketsBase; CFIndex numValues = CFArrayGetCount(array); CFIndex idx; if (CF_IS_OBJC(__kCFArrayTypeID, array)) { cb = &kCFTypeArrayCallBacks; } else { cb = __CFArrayGetCallBacks(array); } result = __CFArrayInit(allocator, __kCFArrayImmutable, numValues, cb); cb = __CFArrayGetCallBacks(result); // GC: use the new array's callbacks so we don't leak. buckets = __CFArrayGetBucketsPtr(result); bucketsAllocator = isStrongMemory(result) ? allocator : kCFAllocatorNull; bucketsBase = CF_IS_COLLECTABLE_ALLOCATOR(bucketsAllocator) ? (void *)auto_zone_base_pointer(objc_collectableZone(), buckets) : NULL; for (idx = 0; idx < numValues; idx++) { const void *value = CFArrayGetValueAtIndex(array, idx); if (NULL != cb->retain) { value = (void *)INVOKE_CALLBACK2(cb->retain, allocator, value); } __CFAssignWithWriteBarrier((void **)&buckets->_item, (void *)value); buckets++; } __CFArraySetCount(result, numValues); return result; }
__private_extern__ CFArrayRef __CFArrayCreate0(CFAllocatorRef allocator, const void **values, CFIndex numValues, const CFArrayCallBacks *callBacks) { CFArrayRef result; const CFArrayCallBacks *cb; struct __CFArrayBucket *buckets; CFAllocatorRef bucketsAllocator; void* bucketsBase; CFIndex idx; CFAssert2(0 <= numValues, __kCFLogAssertion, "%s(): numValues (%d) cannot be less than zero", __PRETTY_FUNCTION__, numValues); result = __CFArrayInit(allocator, __kCFArrayImmutable, numValues, callBacks); cb = __CFArrayGetCallBacks(result); buckets = __CFArrayGetBucketsPtr(result); bucketsAllocator = isStrongMemory(result) ? allocator : kCFAllocatorNull; bucketsBase = CF_IS_COLLECTABLE_ALLOCATOR(bucketsAllocator) ? (void *)auto_zone_base_pointer(objc_collectableZone(), buckets) : NULL; if (NULL != cb->retain) { for (idx = 0; idx < numValues; idx++) { __CFAssignWithWriteBarrier((void **)&buckets->_item, (void *)INVOKE_CALLBACK2(cb->retain, allocator, *values)); values++; buckets++; } } else { for (idx = 0; idx < numValues; idx++) { __CFAssignWithWriteBarrier((void **)&buckets->_item, (void *)*values); values++; buckets++; } } __CFArraySetCount(result, numValues); return result; }