CFAllocatorRef CFGetAllocator(CFTypeRef cf) { if (NULL == cf) return kCFAllocatorSystemDefault; if (__kCFAllocatorTypeID_CONST == __CFGenericTypeID_inline(cf)) { return __CFAllocatorGetAllocator(cf); } return __CFGetAllocator(cf); }
void CFSetRemoveValue(CFMutableSetRef set, const void *value) { struct __CFSetBucket *match; const CFSetCallBacks *cb; CF_OBJC_FUNCDISPATCH1(__kCFSetTypeID, void, set, "removeObject:", value); __CFGenericValidateType(set, __kCFSetTypeID); switch (__CFSetGetType(set)) { case __kCFSetMutable: case __kCFSetFixedMutable: break; default: CFAssert2(__CFSetGetType(set) != __kCFSetImmutable, __kCFLogAssertion, "%s(): immutable set %p passed to mutating operation", __PRETTY_FUNCTION__, set); break; } if (0 == set->_count) return; __CFSetFindBuckets1(set, value, &match); if (!match) return; set->_count--; if (1) { cb = __CFSetGetCallBacks(set); if (cb->release) { INVOKE_CALLBACK3(((void (*)(CFAllocatorRef, const void *, void *))cb->release), __CFGetAllocator(set), match->_key, set->_context); } match->_key = set->_deletedMarker; set->_bucketsUsed--; } }
void CFArraySetValueAtIndex(CFMutableArrayRef array, CFIndex idx, const void *value) { CF_OBJC_FUNCDISPATCHV(__kCFArrayTypeID, void, (NSMutableArray *)array, setObject:(id)value atIndex:(NSUInteger)idx); __CFGenericValidateType(array, __kCFArrayTypeID); CFAssert1(__CFArrayGetType(array) != __kCFArrayImmutable, __kCFLogAssertion, "%s(): array is immutable", __PRETTY_FUNCTION__); CFAssert2(0 <= idx && idx <= __CFArrayGetCount(array), __kCFLogAssertion, "%s(): index (%d) out of bounds", __PRETTY_FUNCTION__, idx); CHECK_FOR_MUTATION(array); if (idx == __CFArrayGetCount(array)) { _CFArrayReplaceValues(array, CFRangeMake(idx, 0), &value, 1); } else { BEGIN_MUTATION(array); const void *old_value; const CFArrayCallBacks *cb = __CFArrayGetCallBacks(array); CFAllocatorRef allocator = __CFGetAllocator(array); struct __CFArrayBucket *bucket = __CFArrayGetBucketAtIndex(array, idx); if (NULL != cb->retain && !hasBeenFinalized(array)) { value = (void *)INVOKE_CALLBACK2(cb->retain, allocator, value); } old_value = bucket->_item; __CFAssignWithWriteBarrier((void **)&bucket->_item, (void *)value); // GC: handles deque/CFStorage cases. if (NULL != cb->release && !hasBeenFinalized(array)) { INVOKE_CALLBACK2(cb->release, allocator, old_value); } array->_mutations++; END_MUTATION(array); } }
void CFSetRemoveAllValues(CFMutableSetRef set) { struct __CFSetBucket *buckets; const CFSetCallBacks *cb; CFAllocatorRef allocator; CFIndex idx, nbuckets; CF_OBJC_FUNCDISPATCH0(__kCFSetTypeID, void, set, "removeAllObjects"); __CFGenericValidateType(set, __kCFSetTypeID); switch (__CFSetGetType(set)) { case __kCFSetMutable: case __kCFSetFixedMutable: break; default: CFAssert2(__CFSetGetType(set) != __kCFSetImmutable, __kCFLogAssertion, "%s(): immutable set %p passed to mutating operation", __PRETTY_FUNCTION__, set); break; } if (0 == set->_count) return; buckets = set->_buckets; nbuckets = set->_bucketsNum; cb = __CFSetGetCallBacks(set); allocator = __CFGetAllocator(set); for (idx = 0; idx < nbuckets; idx++) { if (__CFSetBucketIsOccupied(set, &buckets[idx])) { if (cb->release) { INVOKE_CALLBACK3(((void (*)(CFAllocatorRef, const void *, void *))cb->release), allocator, buckets[idx]._key, set->_context); } buckets[idx]._key = set->_emptyMarker; } } set->_bucketsUsed = 0; set->_count = 0; }
static CFStringRef __CFArrayCopyDescription(CFTypeRef cf) { CFArrayRef array = (CFArrayRef)cf; CFMutableStringRef result; const CFArrayCallBacks *cb; CFAllocatorRef allocator; CFIndex idx, cnt; cnt = __CFArrayGetCount(array); allocator = __CFGetAllocator(array); result = CFStringCreateMutable(allocator, 0); switch (__CFArrayGetType(array)) { case __kCFArrayImmutable: CFStringAppendFormat(result, NULL, CFSTR("<CFArray %p [%p]>{type = immutable, count = %lu, values = (%s"), cf, allocator, (unsigned long)cnt, cnt ? "\n" : ""); break; case __kCFArrayDeque: CFStringAppendFormat(result, NULL, CFSTR("<CFArray %p [%p]>{type = mutable-small, count = %lu, values = (%s"), cf, allocator, (unsigned long)cnt, cnt ? "\n" : ""); break; } cb = __CFArrayGetCallBacks(array); for (idx = 0; idx < cnt; idx++) { CFStringRef desc = NULL; const void *val = __CFArrayGetBucketAtIndex(array, idx)->_item; if (NULL != cb->copyDescription) { desc = (CFStringRef)INVOKE_CALLBACK1(cb->copyDescription, val); } if (NULL != desc) { CFStringAppendFormat(result, NULL, CFSTR("\t%lu : %@\n"), (unsigned long)idx, desc); CFRelease(desc); } else { CFStringAppendFormat(result, NULL, CFSTR("\t%lu : <%p>\n"), (unsigned long)idx, val); } } CFStringAppend(result, CFSTR(")}")); return result; }
static void __CFArrayDeallocate(CFTypeRef cf) { CFArrayRef array = (CFArrayRef)cf; BEGIN_MUTATION(array); #if DEPLOYMENT_TARGET_MACOSX // Under GC, keep contents alive when we know we can, either standard callbacks or NULL // if (__CFBitfieldGetValue(cf->info, 5, 4)) return; // bits only ever set under GC CFAllocatorRef allocator = __CFGetAllocator(array); if (CF_IS_COLLECTABLE_ALLOCATOR(allocator)) { // XXX_PCB keep array intact during finalization. const CFArrayCallBacks *cb = __CFArrayGetCallBacks(array); if (cb->retain == NULL && cb->release == NULL) { END_MUTATION(array); return; } if (cb == &kCFTypeArrayCallBacks || cb->release == kCFTypeArrayCallBacks.release) { markFinalized(cf); for (CFIndex idx = 0; idx < __CFArrayGetCount(array); idx++) { const void *item = CFArrayGetValueAtIndex(array, 0 + idx); kCFTypeArrayCallBacks.release(kCFAllocatorSystemDefault, item); } END_MUTATION(array); return; } } #endif __CFArrayReleaseValues(array, CFRangeMake(0, __CFArrayGetCount(array)), true); END_MUTATION(array); }
static void __CFArrayReleaseValues(CFArrayRef array, CFRange range, bool releaseStorageIfPossible) { const CFArrayCallBacks *cb = __CFArrayGetCallBacks(array); CFAllocatorRef allocator; CFIndex idx; switch (__CFArrayGetType(array)) { case __kCFArrayImmutable: if (NULL != cb->release && 0 < range.length && !hasBeenFinalized(array)) { // if we've been finalized then we know that // 1) we're using the standard callback on GC memory // 2) the slots don't' need to be zeroed struct __CFArrayBucket *buckets = __CFArrayGetBucketsPtr(array); allocator = __CFGetAllocator(array); for (idx = 0; idx < range.length; idx++) { INVOKE_CALLBACK2(cb->release, allocator, buckets[idx + range.location]._item); buckets[idx + range.location]._item = NULL; // GC: break strong reference. } } break; case __kCFArrayDeque: { struct __CFArrayDeque *deque = (struct __CFArrayDeque *)array->_store; if (0 < range.length && NULL != deque && !hasBeenFinalized(array)) { struct __CFArrayBucket *buckets = __CFArrayGetBucketsPtr(array); if (NULL != cb->release) { allocator = __CFGetAllocator(array); for (idx = 0; idx < range.length; idx++) { INVOKE_CALLBACK2(cb->release, allocator, buckets[idx + range.location]._item); buckets[idx + range.location]._item = NULL; // GC: break strong reference. } } else { for (idx = 0; idx < range.length; idx++) { buckets[idx + range.location]._item = NULL; // GC: break strong reference. } } } if (releaseStorageIfPossible && 0 == range.location && __CFArrayGetCount(array) == range.length) { allocator = __CFGetAllocator(array); if (NULL != deque) if (!CF_IS_COLLECTABLE_ALLOCATOR(allocator)) CFAllocatorDeallocate(allocator, deque); __CFArraySetCount(array, 0); // GC: _count == 0 ==> _store == NULL. ((struct __CFArray *)array)->_store = NULL; } break; } } }
static void __CFSetDeallocate(CFTypeRef cf) { CFMutableSetRef set = (CFMutableSetRef)cf; CFAllocatorRef allocator = __CFGetAllocator(set); if (__CFSetGetType(set) == __kCFSetImmutable) { __CFBitfieldSetValue(((CFRuntimeBase *)set)->_info, 1, 0, __kCFSetFixedMutable); } CFSetRemoveAllValues(set); if (__CFSetGetType(set) == __kCFSetMutable && set->_buckets) { CFAllocatorDeallocate(allocator, set->_buckets); } }
void CFSetSetValue(CFMutableSetRef set, const void *value) { struct __CFSetBucket *match, *nomatch; const CFSetCallBacks *cb; const void *newValue; CF_OBJC_FUNCDISPATCH1(__kCFSetTypeID, void, set, "_setObject:", value); __CFGenericValidateType(set, __kCFSetTypeID); switch (__CFSetGetType(set)) { case __kCFSetMutable: if (set->_bucketsUsed == set->_capacity || NULL == set->_buckets) { __CFSetGrow(set, 1); } break; case __kCFSetFixedMutable: break; default: CFAssert2(__CFSetGetType(set) != __kCFSetImmutable, __kCFLogAssertion, "%s(): immutable set %p passed to mutating operation", __PRETTY_FUNCTION__, set); break; } __CFSetFindBuckets2(set, value, &match, &nomatch); cb = __CFSetGetCallBacks(set); if (cb->retain) { newValue = (void *)INVOKE_CALLBACK3(((const void *(*)(CFAllocatorRef, const void *, void *))cb->retain), __CFGetAllocator(set), value, set->_context); } else { newValue = value; } if (match) { if (cb->release) { INVOKE_CALLBACK3(((void (*)(CFAllocatorRef, const void *, void *))cb->release), __CFGetAllocator(set), match->_key, set->_context); match->_key = set->_deletedMarker; } if (set->_emptyMarker == newValue) { __CFSetFindNewEmptyMarker(set); } if (set->_deletedMarker == newValue) { __CFSetFindNewDeletedMarker(set); } match->_key = newValue; } else { CFAssert3(__kCFSetFixedMutable != __CFSetGetType(set) || set->_count < set->_capacity, __kCFLogAssertion, "%s(): capacity exceeded on fixed-capacity set %p (capacity = %d)", __PRETTY_FUNCTION__, set, set->_capacity); if (set->_emptyMarker == newValue) { __CFSetFindNewEmptyMarker(set); } if (set->_deletedMarker == newValue) { __CFSetFindNewDeletedMarker(set); } nomatch->_key = newValue; set->_bucketsUsed++; set->_count++; } }
static void __CFArrayReleaseValues(CFArrayRef array, CFRange range, bool releaseStorageIfPossible) { const CFArrayCallBacks *cb = __CFArrayGetCallBacks(array); CFAllocatorRef allocator; CFIndex idx; switch (__CFArrayGetType(array)) { case __kCFArrayImmutable: if (NULL != cb->release && 0 < range.length) { struct __CFArrayBucket *buckets = __CFArrayGetBucketsPtr(array); allocator = __CFGetAllocator(array); for (idx = 0; idx < range.length; idx++) { INVOKE_CALLBACK2(cb->release, allocator, buckets[idx + range.location]._item); } memset(buckets + range.location, 0, sizeof(struct __CFArrayBucket) * range.length); } break; case __kCFArrayDeque: { struct __CFArrayDeque *deque = (struct __CFArrayDeque *)array->_store; if (0 < range.length && NULL != deque) { struct __CFArrayBucket *buckets = __CFArrayGetBucketsPtr(array); if (NULL != cb->release) { allocator = __CFGetAllocator(array); for (idx = 0; idx < range.length; idx++) { INVOKE_CALLBACK2(cb->release, allocator, buckets[idx + range.location]._item); } } memset(buckets + range.location, 0, sizeof(struct __CFArrayBucket) * range.length); } if (releaseStorageIfPossible && 0 == range.location && __CFArrayGetCount(array) == range.length) { allocator = __CFGetAllocator(array); if (NULL != deque) CFAllocatorDeallocate(allocator, deque); __CFArraySetCount(array, 0); ((struct __CFArray *)array)->_store = NULL; } break; } } }
static void __CFSetGrow(CFMutableSetRef set, CFIndex numNewValues) { struct __CFSetBucket *oldbuckets = set->_buckets; CFIndex idx, oldnbuckets = set->_bucketsNum; CFIndex oldCount = set->_count; set->_capacity = __CFSetRoundUpCapacity(oldCount + numNewValues); set->_bucketsNum = __CFSetNumBucketsForCapacity(set->_capacity); set->_buckets = CFAllocatorAllocate(__CFGetAllocator(set), set->_bucketsNum * sizeof(struct __CFSetBucket), 0); if (NULL == set->_buckets) HALT; if (__CFOASafe) __CFSetLastAllocationEventName(set->_buckets, "CFSet (store)"); for (idx = set->_bucketsNum; idx--;) { set->_buckets[idx]._key = set->_emptyMarker; } if (NULL == oldbuckets) return; for (idx = 0; idx < oldnbuckets; idx++) { if (__CFSetBucketIsOccupied(set, &oldbuckets[idx])) { struct __CFSetBucket *match, *nomatch; __CFSetFindBuckets2(set, oldbuckets[idx]._key, &match, &nomatch); CFAssert3(!match, __kCFLogAssertion, "%s(): two values (%p, %p) now hash to the same slot; mutable value changed while in table or hash value is not immutable", __PRETTY_FUNCTION__, oldbuckets[idx]._key, match->_key); nomatch->_key = oldbuckets[idx]._key; } } CFAssert1(set->_count == oldCount, __kCFLogAssertion, "%s(): set count differs after rehashing; error", __PRETTY_FUNCTION__); CFAllocatorDeallocate(__CFGetAllocator(set), oldbuckets); }
static void __CFTreeDeallocate(CFTypeRef cf) { CFTreeRef tree = (CFTreeRef)cf; const struct __CFTreeCallBacks *cb; CFAllocatorRef allocator = __CFGetAllocator(tree); if (!CF_IS_COLLECTABLE_ALLOCATOR(allocator)) { // GC: keep the tree intact during finalization. CFTreeRemoveAllChildren(tree); } cb = __CFTreeGetCallBacks(tree); if (NULL != cb->release) { INVOKE_CALLBACK1(cb->release, tree->_info); } if (__kCFTreeHasCustomCallBacks == __CFTreeGetCallBacksType(tree)) { _CFAllocatorDeallocateGC(CFGetAllocator(tree), tree->_callbacks); } }
static void __CFDataDeallocate(CFTypeRef cf) { CFMutableDataRef data = (CFMutableDataRef)cf; if (!__CFDataBytesInline(data)) { CFAllocatorRef deallocator = data->_bytesDeallocator; if (deallocator != NULL) { _CFAllocatorDeallocateGC(deallocator, data->_bytes); if (!_CFAllocatorIsGCRefZero(deallocator)) CFRelease(deallocator); data->_bytes = NULL; } else { if (__CFDataUseAllocator(data)) { _CFAllocatorDeallocateGC(__CFGetAllocator(data), data->_bytes); } else if (!__CFDataAllocatesCollectable(data) && data->_bytes) { free(data->_bytes); } data->_bytes = NULL; } } }
// Check __CFDataShouldAllocateCleared before passing true. static void *__CFDataAllocate(CFDataRef data, CFIndex size, Boolean clear) { void *bytes = NULL; if (__CFDataUseAllocator(data)) { CFAllocatorRef allocator = __CFGetAllocator(data); bytes = CFAllocatorAllocate(allocator, size, 0); if (clear) memset((uint8_t *)bytes, 0, size); } else { if (__CFDataAllocatesCollectable(data)) { bytes = auto_zone_allocate_object(objc_collectableZone(), size, AUTO_MEMORY_UNSCANNED, 0, clear); } else { if (clear) { bytes = calloc(1, size); } else { bytes = malloc(size); } } } return bytes; }
// This function is for Foundation's benefit; no one else should use it. void _CFArraySetCapacity(CFMutableArrayRef array, CFIndex cap) { if (CF_IS_OBJC(__kCFArrayTypeID, array)) return; __CFGenericValidateType(array, __kCFArrayTypeID); CFAssert1(__CFArrayGetType(array) != __kCFArrayImmutable, __kCFLogAssertion, "%s(): array is immutable", __PRETTY_FUNCTION__); CFAssert3(__CFArrayGetCount(array) <= cap, __kCFLogAssertion, "%s(): desired capacity (%d) is less than count (%d)", __PRETTY_FUNCTION__, cap, __CFArrayGetCount(array)); CHECK_FOR_MUTATION(array); BEGIN_MUTATION(array); // Currently, attempting to set the capacity of an array which is the CFStorage // variant, or set the capacity larger than __CF_MAX_BUCKETS_PER_DEQUE, has no // effect. The primary purpose of this API is to help avoid a bunch of the // resizes at the small capacities 4, 8, 16, etc. if (__CFArrayGetType(array) == __kCFArrayDeque) { struct __CFArrayDeque *deque = (struct __CFArrayDeque *)array->_store; CFIndex capacity = __CFArrayDequeRoundUpCapacity(cap); CFIndex size = sizeof(struct __CFArrayDeque) + capacity * sizeof(struct __CFArrayBucket); CFAllocatorRef allocator = __CFGetAllocator(array); allocator = _CFConvertAllocatorToGCRefZeroEquivalent(allocator); Boolean collectableMemory = CF_IS_COLLECTABLE_ALLOCATOR(allocator); if (NULL == deque) { deque = (struct __CFArrayDeque *)CFAllocatorAllocate(allocator, size, isStrongMemory(array) ? __kCFAllocatorGCScannedMemory : 0); if (NULL == deque) __CFArrayHandleOutOfMemory(array, size); if (__CFOASafe) __CFSetLastAllocationEventName(deque, "CFArray (store-deque)"); deque->_leftIdx = capacity / 2; } else { struct __CFArrayDeque *olddeque = deque; CFIndex oldcap = deque->_capacity; deque = (struct __CFArrayDeque *)CFAllocatorAllocate(allocator, size, isStrongMemory(array) ? __kCFAllocatorGCScannedMemory : 0); if (NULL == deque) __CFArrayHandleOutOfMemory(array, size); objc_memmove_collectable(deque, olddeque, sizeof(struct __CFArrayDeque) + oldcap * sizeof(struct __CFArrayBucket)); if (!collectableMemory) CFAllocatorDeallocate(allocator, olddeque); if (__CFOASafe) __CFSetLastAllocationEventName(deque, "CFArray (store-deque)"); } deque->_capacity = capacity; __CFAssignWithWriteBarrier((void **)&array->_store, (void *)deque); } END_MUTATION(array); }
// may move deque storage, as it may need to grow deque static void __CFArrayRepositionDequeRegions(CFMutableArrayRef array, CFRange range, CFIndex newCount) { // newCount elements are going to replace the range, and the result will fit in the deque struct __CFArrayDeque *deque = (struct __CFArrayDeque *)array->_store; struct __CFArrayBucket *buckets; CFIndex cnt, futureCnt, numNewElems; CFIndex L, A, B, C, R; buckets = (struct __CFArrayBucket *)((uint8_t *)deque + sizeof(struct __CFArrayDeque)); cnt = __CFArrayGetCount(array); futureCnt = cnt - range.length + newCount; L = deque->_leftIdx; // length of region to left of deque A = range.location; // length of region in deque to left of replaced range B = range.length; // length of replaced range C = cnt - B - A; // length of region in deque to right of replaced range R = deque->_capacity - cnt - L; // length of region to right of deque numNewElems = newCount - B; CFIndex wiggle = deque->_capacity >> 17; if (wiggle < 4) wiggle = 4; if (deque->_capacity < (uint32_t)futureCnt || (cnt < futureCnt && L + R < wiggle)) { // must be inserting or space is tight, reallocate and re-center everything CFIndex capacity = __CFArrayDequeRoundUpCapacity(futureCnt + wiggle); CFIndex size = sizeof(struct __CFArrayDeque) + capacity * sizeof(struct __CFArrayBucket); CFAllocatorRef allocator = __CFGetAllocator(array); allocator = _CFConvertAllocatorToGCRefZeroEquivalent(allocator); Boolean collectableMemory = CF_IS_COLLECTABLE_ALLOCATOR(allocator); struct __CFArrayDeque *newDeque = (struct __CFArrayDeque *)CFAllocatorAllocate(allocator, size, isStrongMemory(array) ? __kCFAllocatorGCScannedMemory : 0); if (__CFOASafe) __CFSetLastAllocationEventName(newDeque, "CFArray (store-deque)"); struct __CFArrayBucket *newBuckets = (struct __CFArrayBucket *)((uint8_t *)newDeque + sizeof(struct __CFArrayDeque)); CFIndex oldL = L; CFIndex newL = (capacity - futureCnt) / 2; CFIndex oldC0 = oldL + A + B; CFIndex newC0 = newL + A + newCount; newDeque->_leftIdx = newL; newDeque->_capacity = capacity; if (0 < A) objc_memmove_collectable(newBuckets + newL, buckets + oldL, A * sizeof(struct __CFArrayBucket)); if (0 < C) objc_memmove_collectable(newBuckets + newC0, buckets + oldC0, C * sizeof(struct __CFArrayBucket)); __CFAssignWithWriteBarrier((void **)&array->_store, (void *)newDeque); if (!collectableMemory && deque) CFAllocatorDeallocate(allocator, deque); //printf("3: array %p store is now %p (%lx)\n", array, array->_store, *(unsigned long *)(array->_store)); return; } if ((numNewElems < 0 && C < A) || (numNewElems <= R && C < A)) { // move C // deleting: C is smaller // inserting: C is smaller and R has room CFIndex oldC0 = L + A + B; CFIndex newC0 = L + A + newCount; if (0 < C) objc_memmove_collectable(buckets + newC0, buckets + oldC0, C * sizeof(struct __CFArrayBucket)); // GrP GC: zero-out newly exposed space on the right, if any if (oldC0 > newC0) memset(buckets + newC0 + C, 0, (oldC0 - newC0) * sizeof(struct __CFArrayBucket)); } else if ((numNewElems < 0) || (numNewElems <= L && A <= C)) { // move A // deleting: A is smaller or equal (covers remaining delete cases) // inserting: A is smaller and L has room CFIndex oldL = L; CFIndex newL = L - numNewElems; deque->_leftIdx = newL; if (0 < A) objc_memmove_collectable(buckets + newL, buckets + oldL, A * sizeof(struct __CFArrayBucket)); // GrP GC: zero-out newly exposed space on the left, if any if (newL > oldL) memset(buckets + oldL, 0, (newL - oldL) * sizeof(struct __CFArrayBucket)); } else { // now, must be inserting, and either: // A<=C, but L doesn't have room (R might have, but don't care) // C<A, but R doesn't have room (L might have, but don't care) // re-center everything CFIndex oldL = L; CFIndex newL = (L + R - numNewElems) / 2; newL = newL - newL / 2; CFIndex oldC0 = oldL + A + B; CFIndex newC0 = newL + A + newCount; deque->_leftIdx = newL; if (newL < oldL) { if (0 < A) objc_memmove_collectable(buckets + newL, buckets + oldL, A * sizeof(struct __CFArrayBucket)); if (0 < C) objc_memmove_collectable(buckets + newC0, buckets + oldC0, C * sizeof(struct __CFArrayBucket)); // GrP GC: zero-out newly exposed space on the right, if any if (oldC0 > newC0) memset(buckets + newC0 + C, 0, (oldC0 - newC0) * sizeof(struct __CFArrayBucket)); } else { if (0 < C) objc_memmove_collectable(buckets + newC0, buckets + oldC0, C * sizeof(struct __CFArrayBucket)); if (0 < A) objc_memmove_collectable(buckets + newL, buckets + oldL, A * sizeof(struct __CFArrayBucket)); // GrP GC: zero-out newly exposed space on the left, if any if (newL > oldL) memset(buckets + oldL, 0, (newL - oldL) * sizeof(struct __CFArrayBucket)); } } }
__private_extern__ const void *__CFSetAddValueAndReturn(CFMutableSetRef set, const void *value) { struct __CFSetBucket *match, *nomatch; const CFSetCallBacks *cb; const void *newValue; // #warning not toll-free bridged, but internal __CFGenericValidateType(set, __kCFSetTypeID); switch (__CFSetGetType(set)) { case __kCFSetMutable: if (set->_bucketsUsed == set->_capacity || NULL == set->_buckets) { __CFSetGrow(set, 1); } break; case __kCFSetFixedMutable: CFAssert3(set->_count < set->_capacity, __kCFLogAssertion, "%s(): capacity exceeded on fixed-capacity set %p (capacity = %d)", __PRETTY_FUNCTION__, set, set->_capacity); break; default: CFAssert2(__CFSetGetType(set) != __kCFSetImmutable, __kCFLogAssertion, "%s(): immutable set %p passed to mutating operation", __PRETTY_FUNCTION__, set); break; } __CFSetFindBuckets2(set, value, &match, &nomatch); if (match) { return match->_key; } else { cb = __CFSetGetCallBacks(set); if (cb->retain) { newValue = (void *)INVOKE_CALLBACK3(((const void *(*)(CFAllocatorRef, const void *, void *))cb->retain), __CFGetAllocator(set), value, set->_context); } else { newValue = value; } if (set->_emptyMarker == newValue) { __CFSetFindNewEmptyMarker(set); } if (set->_deletedMarker == newValue) { __CFSetFindNewDeletedMarker(set); } nomatch->_key = newValue; set->_bucketsUsed++; set->_count++; return newValue; } }
// This function does no ObjC dispatch or argument checking; // It should only be called from places where that dispatch and check has already been done, or NSCFArray void _CFArrayReplaceValues(CFMutableArrayRef array, CFRange range, const void **newValues, CFIndex newCount) { CHECK_FOR_MUTATION(array); BEGIN_MUTATION(array); const CFArrayCallBacks *cb; CFIndex idx, cnt, futureCnt; const void **newv, *buffer[256]; cnt = __CFArrayGetCount(array); futureCnt = cnt - range.length + newCount; CFAssert1(newCount <= futureCnt, __kCFLogAssertion, "%s(): internal error 1", __PRETTY_FUNCTION__); cb = __CFArrayGetCallBacks(array); CFAllocatorRef allocator = __CFGetAllocator(array); /* Retain new values if needed, possibly allocating a temporary buffer for them */ if (NULL != cb->retain && !hasBeenFinalized(array)) { newv = (newCount <= 256) ? (const void **)buffer : (const void **)CFAllocatorAllocate(kCFAllocatorSystemDefault, newCount * sizeof(void *), 0); // GC OK if (newv != buffer && __CFOASafe) __CFSetLastAllocationEventName(newv, "CFArray (temp)"); for (idx = 0; idx < newCount; idx++) { newv[idx] = (void *)INVOKE_CALLBACK2(cb->retain, allocator, (void *)newValues[idx]); } } else { newv = newValues; } array->_mutations++; /* Now, there are three regions of interest, each of which may be empty: * A: the region from index 0 to one less than the range.location * B: the region of the range * C: the region from range.location + range.length to the end * Note that index 0 is not necessarily at the lowest-address edge * of the available storage. The values in region B need to get * released, and the values in regions A and C (depending) need * to get shifted if the number of new values is different from * the length of the range being replaced. */ if (0 < range.length) { __CFArrayReleaseValues(array, range, false); } // region B elements are now "dead" if (0) { } else if (NULL == array->_store) { if (0) { } else if (0 <= futureCnt) { struct __CFArrayDeque *deque; CFIndex capacity = __CFArrayDequeRoundUpCapacity(futureCnt); CFIndex size = sizeof(struct __CFArrayDeque) + capacity * sizeof(struct __CFArrayBucket); deque = (struct __CFArrayDeque *)CFAllocatorAllocate(_CFConvertAllocatorToGCRefZeroEquivalent(allocator), size, isStrongMemory(array) ? __kCFAllocatorGCScannedMemory : 0); if (__CFOASafe) __CFSetLastAllocationEventName(deque, "CFArray (store-deque)"); deque->_leftIdx = (capacity - newCount) / 2; deque->_capacity = capacity; __CFAssignWithWriteBarrier((void **)&array->_store, (void *)deque); } } else { // Deque // reposition regions A and C for new region B elements in gap if (0) { } else if (range.length != newCount) { __CFArrayRepositionDequeRegions(array, range, newCount); } } // copy in new region B elements if (0 < newCount) { if (0) { } else { // Deque struct __CFArrayDeque *deque = (struct __CFArrayDeque *)array->_store; struct __CFArrayBucket *raw_buckets = (struct __CFArrayBucket *)((uint8_t *)deque + sizeof(struct __CFArrayDeque)); objc_memmove_collectable(raw_buckets + deque->_leftIdx + range.location, newv, newCount * sizeof(struct __CFArrayBucket)); } } __CFArraySetCount(array, futureCnt); if (newv != buffer && newv != newValues) CFAllocatorDeallocate(kCFAllocatorSystemDefault, newv); END_MUTATION(array); }