void CFAllocatorDeallocate(CFAllocatorRef allocator, void *ptr) { CFAllocatorDeallocateCallBack deallocateFunc; if (kCFAllocatorSystemDefaultGCRefZero == allocator) { if (_CFAllocatorIsGCRefZero(allocator)) return; allocator = kCFAllocatorSystemDefault; } else if (kCFAllocatorDefaultGCRefZero == allocator) { // Under GC, we can't use just any old allocator when the GCRefZero allocator was requested allocator = kCFUseCollectableAllocator ? kCFAllocatorSystemDefault : __CFGetDefaultAllocator(); if (CF_IS_COLLECTABLE_ALLOCATOR(allocator)) return; } else if (NULL == allocator) { allocator = __CFGetDefaultAllocator(); } #if defined(DEBUG) && (DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_EMBEDDED_MINI) if (allocator->_base._cfisa == __CFISAForTypeID(__kCFAllocatorTypeID)) { __CFGenericValidateType(allocator, __kCFAllocatorTypeID); } #else __CFGenericValidateType(allocator, __kCFAllocatorTypeID); #endif #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_EMBEDDED_MINI if (allocator->_base._cfisa != __CFISAForTypeID(__kCFAllocatorTypeID)) { // malloc_zone_t * #if defined(DEBUG) size_t size = malloc_size(ptr); if (size) memset(ptr, 0xCC, size); #endif return malloc_zone_free((malloc_zone_t *)allocator, ptr); } #endif deallocateFunc = __CFAllocatorGetDeallocateFunction(&allocator->_context); if (NULL != ptr && NULL != deallocateFunc) { INVOKE_CALLBACK2(deallocateFunc, ptr, allocator->_context.info); } }
CFMutableDataRef CFDataCreateMutableCopy(CFAllocatorRef allocator, CFIndex capacity, CFDataRef data) { // Do not allow magic allocator for now for mutable datas, because it // isn't remembered for proper handling later when growth of the buffer // has to occur. Boolean wasMagic = _CFAllocatorIsGCRefZero(allocator); if (0 == capacity) allocator = _CFConvertAllocatorToNonGCRefZeroEquivalent(allocator); CFMutableDataRef r = (CFMutableDataRef) __CFDataInit(allocator, (0 == capacity) ? kCFMutable : kCFFixedMutable, capacity, CFDataGetBytePtr(data), CFDataGetLength(data), NULL); if (wasMagic) CFMakeCollectable(r); return r; }
static void __CFDataDeallocate(CFTypeRef cf) { CFMutableDataRef data = (CFMutableDataRef)cf; if (!__CFDataBytesInline(data)) { CFAllocatorRef deallocator = data->_bytesDeallocator; if (deallocator != NULL) { _CFAllocatorDeallocateGC(deallocator, data->_bytes); if (!_CFAllocatorIsGCRefZero(deallocator)) CFRelease(deallocator); data->_bytes = NULL; } else { if (__CFDataUseAllocator(data)) { _CFAllocatorDeallocateGC(__CFGetAllocator(data), data->_bytes); } else if (!__CFDataAllocatesCollectable(data) && data->_bytes) { free(data->_bytes); } data->_bytes = NULL; } } }
// NULL bytesDeallocator to this function does not mean the default allocator, it means // that there should be no deallocator, and the bytes should be copied. static CFMutableDataRef __CFDataInit(CFAllocatorRef allocator, CFOptionFlags flags, CFIndex capacity, const uint8_t *bytes, CFIndex length, CFAllocatorRef bytesDeallocator) { CFMutableDataRef memory; __CFGenericValidateMutabilityFlags(flags); CFAssert2(0 <= capacity, __kCFLogAssertion, "%s(): capacity (%d) cannot be less than zero", __PRETTY_FUNCTION__, capacity); CFAssert3(kCFFixedMutable != __CFMutableVarietyFromFlags(flags) || length <= capacity, __kCFLogAssertion, "%s(): for kCFFixedMutable type, capacity (%d) must be greater than or equal to number of initial elements (%d)", __PRETTY_FUNCTION__, capacity, length); CFAssert2(0 <= length, __kCFLogAssertion, "%s(): length (%d) cannot be less than zero", __PRETTY_FUNCTION__, length); Boolean collectableMemory = CF_IS_COLLECTABLE_ALLOCATOR(allocator); Boolean noCopy = bytesDeallocator != NULL; Boolean isMutable = ((flags & __kCFMutable) != 0); Boolean isGrowable = ((flags & __kCFGrowable) != 0); Boolean allocateInline = !isGrowable && !noCopy && capacity < INLINE_BYTES_THRESHOLD; allocator = (allocator == NULL) ? __CFGetDefaultAllocator() : allocator; Boolean useAllocator = (allocator != kCFAllocatorSystemDefault && allocator != kCFAllocatorMalloc && allocator != kCFAllocatorMallocZone); CFIndex size = sizeof(struct __CFData) - sizeof(CFRuntimeBase); if (allocateInline) { size += sizeof(uint8_t) * __CFDataNumBytesForCapacity(capacity) + sizeof(uint8_t) * 15; // for 16-byte alignment fixup } memory = (CFMutableDataRef)_CFRuntimeCreateInstance(allocator, __kCFDataTypeID, size, NULL); if (NULL == memory) { return NULL; } __CFDataSetNumBytesUsed(memory, 0); __CFDataSetLength(memory, 0); __CFDataSetInfoBits(memory, (allocateInline ? __kCFBytesInline : 0) | (useAllocator ? __kCFUseAllocator : 0) | (collectableMemory ? __kCFAllocatesCollectable : 0)); BOOL finalize = YES; BOOL scan = YES; if (collectableMemory) { if (allocateInline) { // We have no pointer to anything that needs to be reclaimed, so don't scan or finalize. scan = NO; finalize = NO; } else if (noCopy) { if (CF_IS_COLLECTABLE_ALLOCATOR(bytesDeallocator)) { // We're taking responsibility for externally GC-allocated memory, so scan us, but we don't need to finalize. finalize = NO; } else if (bytesDeallocator == kCFAllocatorNull) { // We don't have responsibility for these bytes, so there's no need to be scanned and we don't need to finalize. scan = NO; finalize = NO; } else { // We have a pointer to non-GC-allocated memory, so don't scan, but do finalize. scan = NO; } } if (!scan) auto_zone_set_unscanned(objc_collectableZone(), memory); if (!finalize) auto_zone_set_nofinalize(objc_collectableZone(), memory); } if (isMutable && isGrowable) { __CFDataSetCapacity(memory, __CFDataRoundUpCapacity(1)); __CFDataSetNumBytes(memory, __CFDataNumBytesForCapacity(__CFDataRoundUpCapacity(1))); __CFSetMutableVariety(memory, kCFMutable); } else { /* Don't round up capacity */ __CFDataSetCapacity(memory, capacity); __CFDataSetNumBytes(memory, __CFDataNumBytesForCapacity(capacity)); __CFSetMutableVariety(memory, kCFFixedMutable); } if (noCopy) { __CFAssignWithWriteBarrier((void **)&memory->_bytes, (uint8_t *)bytes); if (finalize) { if (_CFAllocatorIsGCRefZero(bytesDeallocator)) { memory->_bytesDeallocator = bytesDeallocator; } else { memory->_bytesDeallocator = (CFAllocatorRef)CFRetain(_CFConvertAllocatorToNonGCRefZeroEquivalent(bytesDeallocator)); } } if (CF_IS_COLLECTABLE_ALLOCATOR(bytesDeallocator) && !_CFAllocatorIsGCRefZero(bytesDeallocator)) { // When given a GC allocator which is not one of the GCRefZero ones as the deallocator, we assume that the no-copy memory is GC-allocated with a retain count of (at least) 1 and we should release it now instead of waiting until __CFDataDeallocate. auto_zone_release(objc_collectableZone(), memory->_bytes); } __CFDataSetNumBytesUsed(memory, length); __CFDataSetLength(memory, length); // Mutable no-copy datas are not allowed, so don't bother setting needsToZero flag. } else { Boolean cleared = (isMutable && !isGrowable && !_CFExecutableLinkedOnOrAfter(CFSystemVersionSnowLeopard)); if (!allocateInline) { // assume that allocators give 16-byte aligned memory back -- it is their responsibility __CFAssignWithWriteBarrier((void **)&memory->_bytes, __CFDataAllocate(memory, __CFDataNumBytes(memory) * sizeof(uint8_t), cleared)); if (__CFOASafe) __CFSetLastAllocationEventName(memory->_bytes, "CFData (store)"); if (NULL == memory->_bytes) { CFRelease(memory); return NULL; } } else { if (length == 0 && !isMutable) { // NSData sets its bytes pointer to NULL when its length is zero. Starting in 10.7 we do the same for CFData. memory->_bytes = NULL; // It is important to set this data as not inlined, so we do not recalculate a bytes pointer from null. __CFDataSetInline(memory, false); } cleared = true; } __CFDataSetNeedsToZero(memory, !cleared); memory->_bytesDeallocator = NULL; CFDataReplaceBytes(memory, CFRangeMake(0, 0), bytes, length); } __CFSetMutableVariety(memory, __CFMutableVarietyFromFlags(flags)); return memory; }