void CFArraySetValueAtIndex(CFMutableArrayRef array, CFIndex idx, const void *value) { CF_OBJC_FUNCDISPATCHV(__kCFArrayTypeID, void, (NSMutableArray *)array, setObject:(id)value atIndex:(NSUInteger)idx); __CFGenericValidateType(array, __kCFArrayTypeID); CFAssert1(__CFArrayGetType(array) != __kCFArrayImmutable, __kCFLogAssertion, "%s(): array is immutable", __PRETTY_FUNCTION__); CFAssert2(0 <= idx && idx <= __CFArrayGetCount(array), __kCFLogAssertion, "%s(): index (%d) out of bounds", __PRETTY_FUNCTION__, idx); CHECK_FOR_MUTATION(array); if (idx == __CFArrayGetCount(array)) { _CFArrayReplaceValues(array, CFRangeMake(idx, 0), &value, 1); } else { BEGIN_MUTATION(array); const void *old_value; const CFArrayCallBacks *cb = __CFArrayGetCallBacks(array); CFAllocatorRef allocator = __CFGetAllocator(array); struct __CFArrayBucket *bucket = __CFArrayGetBucketAtIndex(array, idx); if (NULL != cb->retain && !hasBeenFinalized(array)) { value = (void *)INVOKE_CALLBACK2(cb->retain, allocator, value); } old_value = bucket->_item; __CFAssignWithWriteBarrier((void **)&bucket->_item, (void *)value); // GC: handles deque/CFStorage cases. if (NULL != cb->release && !hasBeenFinalized(array)) { INVOKE_CALLBACK2(cb->release, allocator, old_value); } array->_mutations++; END_MUTATION(array); } }
__private_extern__ CFArrayRef __CFArrayCreateCopy0(CFAllocatorRef allocator, CFArrayRef array) { CFArrayRef result; const CFArrayCallBacks *cb; struct __CFArrayBucket *buckets; CFAllocatorRef bucketsAllocator; void* bucketsBase; CFIndex numValues = CFArrayGetCount(array); CFIndex idx; if (CF_IS_OBJC(__kCFArrayTypeID, array)) { cb = &kCFTypeArrayCallBacks; } else { cb = __CFArrayGetCallBacks(array); } result = __CFArrayInit(allocator, __kCFArrayImmutable, numValues, cb); cb = __CFArrayGetCallBacks(result); // GC: use the new array's callbacks so we don't leak. buckets = __CFArrayGetBucketsPtr(result); bucketsAllocator = isStrongMemory(result) ? allocator : kCFAllocatorNull; bucketsBase = CF_IS_COLLECTABLE_ALLOCATOR(bucketsAllocator) ? (void *)auto_zone_base_pointer(objc_collectableZone(), buckets) : NULL; for (idx = 0; idx < numValues; idx++) { const void *value = CFArrayGetValueAtIndex(array, idx); if (NULL != cb->retain) { value = (void *)INVOKE_CALLBACK2(cb->retain, allocator, value); } __CFAssignWithWriteBarrier((void **)&buckets->_item, (void *)value); buckets++; } __CFArraySetCount(result, numValues); return result; }
__private_extern__ CFArrayRef __CFArrayCreate0(CFAllocatorRef allocator, const void **values, CFIndex numValues, const CFArrayCallBacks *callBacks) { CFArrayRef result; const CFArrayCallBacks *cb; struct __CFArrayBucket *buckets; CFAllocatorRef bucketsAllocator; void* bucketsBase; CFIndex idx; CFAssert2(0 <= numValues, __kCFLogAssertion, "%s(): numValues (%d) cannot be less than zero", __PRETTY_FUNCTION__, numValues); result = __CFArrayInit(allocator, __kCFArrayImmutable, numValues, callBacks); cb = __CFArrayGetCallBacks(result); buckets = __CFArrayGetBucketsPtr(result); bucketsAllocator = isStrongMemory(result) ? allocator : kCFAllocatorNull; bucketsBase = CF_IS_COLLECTABLE_ALLOCATOR(bucketsAllocator) ? (void *)auto_zone_base_pointer(objc_collectableZone(), buckets) : NULL; if (NULL != cb->retain) { for (idx = 0; idx < numValues; idx++) { __CFAssignWithWriteBarrier((void **)&buckets->_item, (void *)INVOKE_CALLBACK2(cb->retain, allocator, *values)); values++; buckets++; } } else { for (idx = 0; idx < numValues; idx++) { __CFAssignWithWriteBarrier((void **)&buckets->_item, (void *)*values); values++; buckets++; } } __CFArraySetCount(result, numValues); return result; }
void CFAllocatorDeallocate(CFAllocatorRef allocator, void *ptr) { CFAllocatorDeallocateCallBack deallocateFunc; if (kCFAllocatorSystemDefaultGCRefZero == allocator) { if (_CFAllocatorIsGCRefZero(allocator)) return; allocator = kCFAllocatorSystemDefault; } else if (kCFAllocatorDefaultGCRefZero == allocator) { // Under GC, we can't use just any old allocator when the GCRefZero allocator was requested allocator = kCFUseCollectableAllocator ? kCFAllocatorSystemDefault : __CFGetDefaultAllocator(); if (CF_IS_COLLECTABLE_ALLOCATOR(allocator)) return; } else if (NULL == allocator) { allocator = __CFGetDefaultAllocator(); } #if defined(DEBUG) && (DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_EMBEDDED_MINI) if (allocator->_base._cfisa == __CFISAForTypeID(__kCFAllocatorTypeID)) { __CFGenericValidateType(allocator, __kCFAllocatorTypeID); } #else __CFGenericValidateType(allocator, __kCFAllocatorTypeID); #endif #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_EMBEDDED_MINI if (allocator->_base._cfisa != __CFISAForTypeID(__kCFAllocatorTypeID)) { // malloc_zone_t * #if defined(DEBUG) size_t size = malloc_size(ptr); if (size) memset(ptr, 0xCC, size); #endif return malloc_zone_free((malloc_zone_t *)allocator, ptr); } #endif deallocateFunc = __CFAllocatorGetDeallocateFunction(&allocator->_context); if (NULL != ptr && NULL != deallocateFunc) { INVOKE_CALLBACK2(deallocateFunc, ptr, allocator->_context.info); } }
static CFStringRef __CFSetCopyDescription(CFTypeRef cf) { CFSetRef set = (CFSetRef)cf; const CFSetCallBacks *cb; const struct __CFSetBucket *buckets; CFIndex idx, nbuckets; CFMutableStringRef result; cb = __CFSetGetCallBacks(set); buckets = set->_buckets; nbuckets = set->_bucketsNum; result = CFStringCreateMutable(kCFAllocatorSystemDefault, 0); CFStringAppendFormat(result, NULL, CFSTR("<CFSet %p [%p]>{count = %u, capacity = %u, values = (\n"), set, CFGetAllocator(set), set->_count, set->_capacity); for (idx = 0; idx < nbuckets; idx++) { if (__CFSetBucketIsOccupied(set, &buckets[idx])) { CFStringRef desc = NULL; if (NULL != cb->copyDescription) { desc = (CFStringRef)INVOKE_CALLBACK2(((CFStringRef (*)(const void *, void *))cb->copyDescription), buckets[idx]._key, set->_context); } if (NULL != desc) { CFStringAppendFormat(result, NULL, CFSTR("\t%u : %@\n"), idx, desc, NULL); CFRelease(desc); } else { CFStringAppendFormat(result, NULL, CFSTR("\t%u : <%p>\n"), idx, buckets[idx]._key, NULL); } } } CFStringAppend(result, CFSTR(")}")); return result; }
void CFAllocatorDeallocate(CFAllocatorRef allocator, void *ptr) { CFAllocatorDeallocateCallBack deallocateFunc; if (NULL == allocator) { allocator = __CFGetDefaultAllocator(); } #if defined(DEBUG) && (DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_EMBEDDED_MINI) if (allocator->_base._cfisa == __CFISAForCFAllocator()) { __CFGenericValidateType(allocator, _kCFRuntimeIDCFAllocator); } #else __CFGenericValidateType(allocator, _kCFRuntimeIDCFAllocator); #endif #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_EMBEDDED_MINI if (allocator->_base._cfisa != __CFISAForCFAllocator()) { // malloc_zone_t * #if defined(DEBUG) size_t size = malloc_size(ptr); if (size) memset(ptr, 0xCC, size); #endif return malloc_zone_free((malloc_zone_t *)allocator, ptr); } #endif deallocateFunc = __CFAllocatorGetDeallocateFunction(&allocator->_context); if (NULL != ptr && NULL != deallocateFunc) { INVOKE_CALLBACK2(deallocateFunc, ptr, allocator->_context.info); } }
void *CFAllocatorReallocate(CFAllocatorRef allocator, void *ptr, CFIndex newsize, CFOptionFlags hint) { CFAllocatorAllocateCallBack allocateFunc; CFAllocatorReallocateCallBack reallocateFunc; CFAllocatorDeallocateCallBack deallocateFunc; void *newptr; if (NULL == allocator) { allocator = __CFGetDefaultAllocator(); } #if defined(DEBUG) && (DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_EMBEDDED_MINI) if (allocator->_base._cfisa == __CFISAForCFAllocator()) { __CFGenericValidateType(allocator, _kCFRuntimeIDCFAllocator); } #else __CFGenericValidateType(allocator, _kCFRuntimeIDCFAllocator); #endif if (NULL == ptr && 0 < newsize) { #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_EMBEDDED_MINI if (allocator->_base._cfisa != __CFISAForCFAllocator()) { // malloc_zone_t * return malloc_zone_malloc((malloc_zone_t *)allocator, newsize); } #endif newptr = NULL; allocateFunc = __CFAllocatorGetAllocateFunction(&allocator->_context); if (allocateFunc) { newptr = (void *)INVOKE_CALLBACK3(allocateFunc, newsize, hint, allocator->_context.info); } return newptr; } if (NULL != ptr && 0 == newsize) { #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_EMBEDDED_MINI if (allocator->_base._cfisa != __CFISAForCFAllocator()) { // malloc_zone_t * #if defined(DEBUG) size_t size = malloc_size(ptr); if (size) memset(ptr, 0xCC, size); #endif malloc_zone_free((malloc_zone_t *)allocator, ptr); return NULL; } #endif deallocateFunc = __CFAllocatorGetDeallocateFunction(&allocator->_context); if (NULL != deallocateFunc) { INVOKE_CALLBACK2(deallocateFunc, ptr, allocator->_context.info); } return NULL; } if (NULL == ptr && 0 == newsize) return NULL; #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_EMBEDDED_MINI if (allocator->_base._cfisa != __CFISAForCFAllocator()) { // malloc_zone_t * return malloc_zone_realloc((malloc_zone_t *)allocator, ptr, newsize); } #endif reallocateFunc = __CFAllocatorGetReallocateFunction(&allocator->_context); if (NULL == reallocateFunc) return NULL; newptr = (void *)INVOKE_CALLBACK4(reallocateFunc, ptr, newsize, hint, allocator->_context.info); return newptr; }
static void __CFArrayReleaseValues(CFArrayRef array, CFRange range, bool releaseStorageIfPossible) { const CFArrayCallBacks *cb = __CFArrayGetCallBacks(array); CFAllocatorRef allocator; CFIndex idx; switch (__CFArrayGetType(array)) { case __kCFArrayImmutable: if (NULL != cb->release && 0 < range.length && !hasBeenFinalized(array)) { // if we've been finalized then we know that // 1) we're using the standard callback on GC memory // 2) the slots don't' need to be zeroed struct __CFArrayBucket *buckets = __CFArrayGetBucketsPtr(array); allocator = __CFGetAllocator(array); for (idx = 0; idx < range.length; idx++) { INVOKE_CALLBACK2(cb->release, allocator, buckets[idx + range.location]._item); buckets[idx + range.location]._item = NULL; // GC: break strong reference. } } break; case __kCFArrayDeque: { struct __CFArrayDeque *deque = (struct __CFArrayDeque *)array->_store; if (0 < range.length && NULL != deque && !hasBeenFinalized(array)) { struct __CFArrayBucket *buckets = __CFArrayGetBucketsPtr(array); if (NULL != cb->release) { allocator = __CFGetAllocator(array); for (idx = 0; idx < range.length; idx++) { INVOKE_CALLBACK2(cb->release, allocator, buckets[idx + range.location]._item); buckets[idx + range.location]._item = NULL; // GC: break strong reference. } } else { for (idx = 0; idx < range.length; idx++) { buckets[idx + range.location]._item = NULL; // GC: break strong reference. } } } if (releaseStorageIfPossible && 0 == range.location && __CFArrayGetCount(array) == range.length) { allocator = __CFGetAllocator(array); if (NULL != deque) if (!CF_IS_COLLECTABLE_ALLOCATOR(allocator)) CFAllocatorDeallocate(allocator, deque); __CFArraySetCount(array, 0); // GC: _count == 0 ==> _store == NULL. ((struct __CFArray *)array)->_store = NULL; } break; } } }
void CFArrayApplyFunction(CFArrayRef array, CFRange range, CFArrayApplierFunction applier, void *context) { CFIndex idx; FAULT_CALLBACK((void **)&(applier)); __CFGenericValidateType(array, __kCFArrayTypeID); __CFArrayValidateRange(array, range, __PRETTY_FUNCTION__); CFAssert1(NULL != applier, __kCFLogAssertion, "%s(): pointer to applier function may not be NULL", __PRETTY_FUNCTION__); CHECK_FOR_MUTATION(array); for (idx = 0; idx < range.length; idx++) { const void *item = CFArrayGetValueAtIndex(array, range.location + idx); INVOKE_CALLBACK2(applier, item, context); } }
CFIndex CFArrayGetLastIndexOfValue(CFArrayRef array, CFRange range, const void *value) { CFIndex idx; __CFGenericValidateType(array, __kCFArrayTypeID); __CFArrayValidateRange(array, range, __PRETTY_FUNCTION__); CHECK_FOR_MUTATION(array); const CFArrayCallBacks *cb = CF_IS_OBJC(CFArrayGetTypeID(), array) ? &kCFTypeArrayCallBacks : __CFArrayGetCallBacks(array); for (idx = range.length; idx--;) { const void *item = CFArrayGetValueAtIndex(array, range.location + idx); if (value == item || (cb->equal && INVOKE_CALLBACK2(cb->equal, value, item))) return idx + range.location; } return kCFNotFound; }
Boolean CFArrayContainsValue(CFArrayRef array, CFRange range, const void *value) { CFIndex idx; __CFGenericValidateType(array, __kCFArrayTypeID); __CFArrayValidateRange(array, range, __PRETTY_FUNCTION__); CHECK_FOR_MUTATION(array); const CFArrayCallBacks *cb = CF_IS_OBJC(CFArrayGetTypeID(), array) ? &kCFTypeArrayCallBacks : __CFArrayGetCallBacks(array); for (idx = 0; idx < range.length; idx++) { const void *item = CFArrayGetValueAtIndex(array, range.location + idx); if (value == item || (cb->equal && INVOKE_CALLBACK2(cb->equal, value, item))) { return true; } } return false; }
static void __CFArrayReleaseValues(CFArrayRef array, CFRange range, bool releaseStorageIfPossible) { const CFArrayCallBacks *cb = __CFArrayGetCallBacks(array); CFAllocatorRef allocator; CFIndex idx; switch (__CFArrayGetType(array)) { case __kCFArrayImmutable: if (NULL != cb->release && 0 < range.length) { struct __CFArrayBucket *buckets = __CFArrayGetBucketsPtr(array); allocator = __CFGetAllocator(array); for (idx = 0; idx < range.length; idx++) { INVOKE_CALLBACK2(cb->release, allocator, buckets[idx + range.location]._item); } memset(buckets + range.location, 0, sizeof(struct __CFArrayBucket) * range.length); } break; case __kCFArrayDeque: { struct __CFArrayDeque *deque = (struct __CFArrayDeque *)array->_store; if (0 < range.length && NULL != deque) { struct __CFArrayBucket *buckets = __CFArrayGetBucketsPtr(array); if (NULL != cb->release) { allocator = __CFGetAllocator(array); for (idx = 0; idx < range.length; idx++) { INVOKE_CALLBACK2(cb->release, allocator, buckets[idx + range.location]._item); } } memset(buckets + range.location, 0, sizeof(struct __CFArrayBucket) * range.length); } if (releaseStorageIfPossible && 0 == range.location && __CFArrayGetCount(array) == range.length) { allocator = __CFGetAllocator(array); if (NULL != deque) CFAllocatorDeallocate(allocator, deque); __CFArraySetCount(array, 0); ((struct __CFArray *)array)->_store = NULL; } break; } } }
void CFSetApplyFunction(CFSetRef set, CFSetApplierFunction applier, void *context) { struct __CFSetBucket *buckets; CFIndex idx, cnt, nbuckets; FAULT_CALLBACK((void **)&(applier)); CF_OBJC_FUNCDISPATCH2(__kCFSetTypeID, void, set, "_applyValues:context:", applier, context); __CFGenericValidateType(set, __kCFSetTypeID); buckets = set->_buckets; nbuckets = set->_bucketsNum; for (idx = 0; idx < nbuckets; idx++) { if (__CFSetBucketIsOccupied(set, &buckets[idx])) { for (cnt = 1; cnt--;) { INVOKE_CALLBACK2(applier, buckets[idx]._key, context); } } } }
__private_extern__ void *_CFPFactoryCreateInstance(CFAllocatorRef allocator, _CFPFactory *factory, CFUUIDRef typeID) { void *result = NULL; if (factory->_enabled) { if (!factory->_func) { factory->_func = (CFPlugInFactoryFunction)CFBundleGetFunctionPointerForName(factory->_plugIn, factory->_funcName); if (!factory->_func) CFLog(__kCFLogPlugIn, CFSTR("Cannot find function pointer %@ for factory %@ in %@"), factory->_funcName, factory->_uuid, factory->_plugIn); } if (factory->_func) { // UPPGOOP FAULT_CALLBACK((void **)&(factory->_func)); result = (void *)INVOKE_CALLBACK2(factory->_func, allocator, typeID); } } else { CFLog(__kCFLogPlugIn, CFSTR("Factory %@ is disabled"), factory->_uuid); } return result; }
CF_PRIVATE void __CFAllocatorDeallocate(CFTypeRef cf) { CFAllocatorRef self = (CFAllocatorRef)cf; CFAllocatorRef allocator = self->_allocator; CFAllocatorReleaseCallBack releaseFunc = __CFAllocatorGetReleaseFunction(&self->_context); if (kCFAllocatorUseContext == allocator) { /* Rather a chicken and egg problem here, so we do things in the reverse order from what was done at create time. */ CFAllocatorDeallocateCallBack deallocateFunc = __CFAllocatorGetDeallocateFunction(&self->_context); void *info = self->_context.info; if (NULL != deallocateFunc) { INVOKE_CALLBACK2(deallocateFunc, (void *)self, info); } if (NULL != releaseFunc) { INVOKE_CALLBACK1(releaseFunc, info); } } else { if (NULL != releaseFunc) { INVOKE_CALLBACK1(releaseFunc, self->_context.info); } CFAllocatorDeallocate(allocator, (void *)self); } }
static void __CFSetFindBuckets1(CFSetRef set, const void *key, struct __CFSetBucket **match) { const CFSetCallBacks *cb = __CFSetGetCallBacks(set); struct __CFSetBucket *buckets = set->_buckets; CFHashCode keyHash = cb->hash ? (CFHashCode)INVOKE_CALLBACK2(((CFHashCode (*)(const void *, void *))cb->hash), key, set->_context) : (CFHashCode)key; UInt32 start = keyHash % set->_bucketsNum; UInt32 probe = start; UInt32 probeskip = 1; *match = NULL; for (;;) { struct __CFSetBucket *currentBucket = buckets + probe; if (__CFSetBucketIsEmpty(set, currentBucket)) { return; } else if (__CFSetBucketIsDeleted(set, currentBucket)) { /* do nothing */ } else if (currentBucket->_key == key || (cb->equal && INVOKE_CALLBACK3((Boolean (*)(void *, void *, void*))cb->equal, currentBucket->_key, key, set->_context))) { *match = currentBucket; return; } probe = (probe + probeskip) % set->_bucketsNum; if (start == probe) return; } }
static Boolean __CFArrayEqual(CFTypeRef cf1, CFTypeRef cf2) { CFArrayRef array1 = (CFArrayRef)cf1; CFArrayRef array2 = (CFArrayRef)cf2; const CFArrayCallBacks *cb1, *cb2; CFIndex idx, cnt; if (array1 == array2) return true; cnt = __CFArrayGetCount(array1); if (cnt != __CFArrayGetCount(array2)) return false; cb1 = __CFArrayGetCallBacks(array1); cb2 = __CFArrayGetCallBacks(array2); if (cb1->equal != cb2->equal) return false; if (0 == cnt) return true; /* after function comparison! */ for (idx = 0; idx < cnt; idx++) { const void *val1 = __CFArrayGetBucketAtIndex(array1, idx)->_item; const void *val2 = __CFArrayGetBucketAtIndex(array2, idx)->_item; if (val1 != val2) { if (NULL == cb1->equal) return false; if (!INVOKE_CALLBACK2(cb1->equal, val1, val2)) return false; } } return true; }
__private_extern__ void *_CFPFactoryCreateInstance(CFAllocatorRef allocator, _CFPFactory *factory, CFUUIDRef typeID) { void *result = NULL; if (factory->_enabled) { if (!factory->_func) { factory->_func = (CFPlugInFactoryFunction)CFBundleGetFunctionPointerForName(factory->_plugIn, factory->_funcName); if (!factory->_func) CFLog(__kCFLogPlugIn, CFSTR("Cannot find function pointer %@ for factory %@ in %@"), factory->_funcName, factory->_uuid, factory->_plugIn); #if BINARY_SUPPORT_CFM if (factory->_func) { // return values from CFBundleGetFunctionPointerForName will always be dyld, but we must force-fault them because pointers to glue code do not fault correctly factory->_func = (void *)((uint32_t)(factory->_func) | 0x1); } #endif /* BINARY_SUPPORT_CFM */ } if (factory->_func) { // UPPGOOP FAULT_CALLBACK((void **)&(factory->_func)); result = (void *)INVOKE_CALLBACK2(factory->_func, allocator, typeID); } } else { CFLog(__kCFLogPlugIn, CFSTR("Factory %@ is disabled"), factory->_uuid); } return result; }
CF_PRIVATE void *_CFPFactoryCreateInstance(CFAllocatorRef allocator, _CFPFactoryRef factory, CFUUIDRef typeID) { void *result = NULL; __CFSpinLock(&factory->_lock); if (factory->_enabled) { if (!factory->_func) { factory->_func = (CFPlugInFactoryFunction)CFBundleGetFunctionPointerForName(factory->_plugIn, factory->_funcName); if (!factory->_func) CFLog(__kCFLogPlugIn, CFSTR("Cannot find function pointer %@ for factory %@ in %@"), factory->_funcName, factory->_uuid, factory->_plugIn); } if (factory->_func) { // UPPGOOP CFPlugInFactoryFunction f = factory->_func; __CFSpinUnlock(&factory->_lock); FAULT_CALLBACK((void **)&(f)); result = (void *)INVOKE_CALLBACK2(f, allocator, typeID); __CFSpinLock(&factory->_lock); } } else { CFLog(__kCFLogPlugIn, CFSTR("Factory %@ is disabled"), factory->_uuid); } __CFSpinUnlock(&factory->_lock); return result; }
void *CFAllocatorReallocate(CFAllocatorRef allocator, void *ptr, CFIndex newsize, CFOptionFlags hint) { CFAllocatorAllocateCallBack allocateFunc; CFAllocatorReallocateCallBack reallocateFunc; CFAllocatorDeallocateCallBack deallocateFunc; void *newptr; if (kCFAllocatorSystemDefaultGCRefZero == allocator) { allocator = kCFAllocatorSystemDefault; } else if (kCFAllocatorDefaultGCRefZero == allocator) { // Under GC, we can't use just any old allocator when the GCRefZero allocator was requested allocator = kCFUseCollectableAllocator ? kCFAllocatorSystemDefault : __CFGetDefaultAllocator(); } else if (NULL == allocator) { allocator = __CFGetDefaultAllocator(); } #if defined(DEBUG) && (DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_EMBEDDED_MINI) if (allocator->_base._cfisa == __CFISAForTypeID(__kCFAllocatorTypeID)) { __CFGenericValidateType(allocator, __kCFAllocatorTypeID); } #else __CFGenericValidateType(allocator, __kCFAllocatorTypeID); #endif if (NULL == ptr && 0 < newsize) { #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_EMBEDDED_MINI if (allocator->_base._cfisa != __CFISAForTypeID(__kCFAllocatorTypeID)) { // malloc_zone_t * return malloc_zone_malloc((malloc_zone_t *)allocator, newsize); } #endif newptr = NULL; allocateFunc = __CFAllocatorGetAllocateFunction(&allocator->_context); if (allocateFunc) { newptr = (void *)INVOKE_CALLBACK3(allocateFunc, newsize, hint, allocator->_context.info); } return newptr; } if (NULL != ptr && 0 == newsize) { #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_EMBEDDED_MINI if (allocator->_base._cfisa != __CFISAForTypeID(__kCFAllocatorTypeID)) { // malloc_zone_t * #if defined(DEBUG) size_t size = malloc_size(ptr); if (size) memset(ptr, 0xCC, size); #endif malloc_zone_free((malloc_zone_t *)allocator, ptr); return NULL; } #endif deallocateFunc = __CFAllocatorGetDeallocateFunction(&allocator->_context); if (NULL != deallocateFunc) { INVOKE_CALLBACK2(deallocateFunc, ptr, allocator->_context.info); } return NULL; } if (NULL == ptr && 0 == newsize) return NULL; #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_EMBEDDED_MINI if (allocator->_base._cfisa != __CFISAForTypeID(__kCFAllocatorTypeID)) { // malloc_zone_t * return malloc_zone_realloc((malloc_zone_t *)allocator, ptr, newsize); } #endif reallocateFunc = __CFAllocatorGetReallocateFunction(&allocator->_context); if (NULL == reallocateFunc) return NULL; newptr = (void *)INVOKE_CALLBACK4(reallocateFunc, ptr, newsize, hint, allocator->_context.info); return newptr; }
// This function does no ObjC dispatch or argument checking; // It should only be called from places where that dispatch and check has already been done, or NSCFArray void _CFArrayReplaceValues(CFMutableArrayRef array, CFRange range, const void **newValues, CFIndex newCount) { CHECK_FOR_MUTATION(array); BEGIN_MUTATION(array); const CFArrayCallBacks *cb; CFIndex idx, cnt, futureCnt; const void **newv, *buffer[256]; cnt = __CFArrayGetCount(array); futureCnt = cnt - range.length + newCount; CFAssert1(newCount <= futureCnt, __kCFLogAssertion, "%s(): internal error 1", __PRETTY_FUNCTION__); cb = __CFArrayGetCallBacks(array); CFAllocatorRef allocator = __CFGetAllocator(array); /* Retain new values if needed, possibly allocating a temporary buffer for them */ if (NULL != cb->retain && !hasBeenFinalized(array)) { newv = (newCount <= 256) ? (const void **)buffer : (const void **)CFAllocatorAllocate(kCFAllocatorSystemDefault, newCount * sizeof(void *), 0); // GC OK if (newv != buffer && __CFOASafe) __CFSetLastAllocationEventName(newv, "CFArray (temp)"); for (idx = 0; idx < newCount; idx++) { newv[idx] = (void *)INVOKE_CALLBACK2(cb->retain, allocator, (void *)newValues[idx]); } } else { newv = newValues; } array->_mutations++; /* Now, there are three regions of interest, each of which may be empty: * A: the region from index 0 to one less than the range.location * B: the region of the range * C: the region from range.location + range.length to the end * Note that index 0 is not necessarily at the lowest-address edge * of the available storage. The values in region B need to get * released, and the values in regions A and C (depending) need * to get shifted if the number of new values is different from * the length of the range being replaced. */ if (0 < range.length) { __CFArrayReleaseValues(array, range, false); } // region B elements are now "dead" if (0) { } else if (NULL == array->_store) { if (0) { } else if (0 <= futureCnt) { struct __CFArrayDeque *deque; CFIndex capacity = __CFArrayDequeRoundUpCapacity(futureCnt); CFIndex size = sizeof(struct __CFArrayDeque) + capacity * sizeof(struct __CFArrayBucket); deque = (struct __CFArrayDeque *)CFAllocatorAllocate(_CFConvertAllocatorToGCRefZeroEquivalent(allocator), size, isStrongMemory(array) ? __kCFAllocatorGCScannedMemory : 0); if (__CFOASafe) __CFSetLastAllocationEventName(deque, "CFArray (store-deque)"); deque->_leftIdx = (capacity - newCount) / 2; deque->_capacity = capacity; __CFAssignWithWriteBarrier((void **)&array->_store, (void *)deque); } } else { // Deque // reposition regions A and C for new region B elements in gap if (0) { } else if (range.length != newCount) { __CFArrayRepositionDequeRegions(array, range, newCount); } } // copy in new region B elements if (0 < newCount) { if (0) { } else { // Deque struct __CFArrayDeque *deque = (struct __CFArrayDeque *)array->_store; struct __CFArrayBucket *raw_buckets = (struct __CFArrayBucket *)((uint8_t *)deque + sizeof(struct __CFArrayDeque)); objc_memmove_collectable(raw_buckets + deque->_leftIdx + range.location, newv, newCount * sizeof(struct __CFArrayBucket)); } } __CFArraySetCount(array, futureCnt); if (newv != buffer && newv != newValues) CFAllocatorDeallocate(kCFAllocatorSystemDefault, newv); END_MUTATION(array); }