__private_extern__ void __CFGenericValidateType_(CFTypeRef cf, CFTypeID type, const char *func) { if (cf && CF_IS_OBJC(type, cf)) return; CFAssert2((cf != NULL) && (NULL != __CFRuntimeClassTable[__CFGenericTypeID_inline(cf)]) && (__kCFNotATypeTypeID != __CFGenericTypeID_inline(cf)) && (__kCFTypeTypeID != __CFGenericTypeID_inline(cf)), __kCFLogAssertion, "%s(): pointer %p is not a CF object", func, cf); \ CFAssert3(__CFGenericTypeID_inline(cf) == type, __kCFLogAssertion, "%s(): pointer %p is not a %s", func, cf, __CFRuntimeClassTable[type]->className); \ }
// This function is for Foundation's benefit; no one else should use it. void _CFSetSetCapacity(CFMutableSetRef set, CFIndex cap) { if (CF_IS_OBJC(__kCFSetTypeID, set)) return; #if defined(DEBUG) __CFGenericValidateType(set, __kCFSetTypeID); CFAssert1(__CFSetGetType(set) != __kCFSetImmutable && __CFSetGetType(set) != __kCFSetFixedMutable, __kCFLogAssertion, "%s(): set is immutable or fixed-mutable", __PRETTY_FUNCTION__); CFAssert3(set->_count <= cap, __kCFLogAssertion, "%s(): desired capacity (%d) is less than count (%d)", __PRETTY_FUNCTION__, cap, set->_count); #endif __CFSetGrow(set, cap - set->_count); }
void CFSetSetValue(CFMutableSetRef set, const void *value) { struct __CFSetBucket *match, *nomatch; const CFSetCallBacks *cb; const void *newValue; CF_OBJC_FUNCDISPATCH1(__kCFSetTypeID, void, set, "_setObject:", value); __CFGenericValidateType(set, __kCFSetTypeID); switch (__CFSetGetType(set)) { case __kCFSetMutable: if (set->_bucketsUsed == set->_capacity || NULL == set->_buckets) { __CFSetGrow(set, 1); } break; case __kCFSetFixedMutable: break; default: CFAssert2(__CFSetGetType(set) != __kCFSetImmutable, __kCFLogAssertion, "%s(): immutable set %p passed to mutating operation", __PRETTY_FUNCTION__, set); break; } __CFSetFindBuckets2(set, value, &match, &nomatch); cb = __CFSetGetCallBacks(set); if (cb->retain) { newValue = (void *)INVOKE_CALLBACK3(((const void *(*)(CFAllocatorRef, const void *, void *))cb->retain), __CFGetAllocator(set), value, set->_context); } else { newValue = value; } if (match) { if (cb->release) { INVOKE_CALLBACK3(((void (*)(CFAllocatorRef, const void *, void *))cb->release), __CFGetAllocator(set), match->_key, set->_context); match->_key = set->_deletedMarker; } if (set->_emptyMarker == newValue) { __CFSetFindNewEmptyMarker(set); } if (set->_deletedMarker == newValue) { __CFSetFindNewDeletedMarker(set); } match->_key = newValue; } else { CFAssert3(__kCFSetFixedMutable != __CFSetGetType(set) || set->_count < set->_capacity, __kCFLogAssertion, "%s(): capacity exceeded on fixed-capacity set %p (capacity = %d)", __PRETTY_FUNCTION__, set, set->_capacity); if (set->_emptyMarker == newValue) { __CFSetFindNewEmptyMarker(set); } if (set->_deletedMarker == newValue) { __CFSetFindNewDeletedMarker(set); } nomatch->_key = newValue; set->_bucketsUsed++; set->_count++; } }
CFMutableSetRef CFSetCreateMutableCopy(CFAllocatorRef allocator, CFIndex capacity, CFSetRef set) { CFMutableSetRef result; const CFSetCallBacks *cb; CFIndex idx, numValues = CFSetGetCount(set); const void **list, *buffer[256]; CFAssert3(0 == capacity || numValues <= capacity, __kCFLogAssertion, "%s(): for fixed-mutable sets, capacity (%d) must be greater than or equal to initial number of values (%d)", __PRETTY_FUNCTION__, capacity, numValues); list = (numValues <= 256) ? buffer : CFAllocatorAllocate(allocator, numValues * sizeof(void *), 0); if (list != buffer && __CFOASafe) __CFSetLastAllocationEventName(list, "CFSet (temp)"); CFSetGetValues(set, list); cb = CF_IS_OBJC(__kCFSetTypeID, set) ? &kCFTypeSetCallBacks : __CFSetGetCallBacks(set); result = CFSetCreateMutable(allocator, capacity, cb); if (0 == capacity) _CFSetSetCapacity(result, numValues); for (idx = 0; idx < numValues; idx++) { CFSetAddValue(result, list[idx]); } if (list != buffer) CFAllocatorDeallocate(allocator, list); return result; }
__private_extern__ const void *__CFSetAddValueAndReturn(CFMutableSetRef set, const void *value) { struct __CFSetBucket *match, *nomatch; const CFSetCallBacks *cb; const void *newValue; // #warning not toll-free bridged, but internal __CFGenericValidateType(set, __kCFSetTypeID); switch (__CFSetGetType(set)) { case __kCFSetMutable: if (set->_bucketsUsed == set->_capacity || NULL == set->_buckets) { __CFSetGrow(set, 1); } break; case __kCFSetFixedMutable: CFAssert3(set->_count < set->_capacity, __kCFLogAssertion, "%s(): capacity exceeded on fixed-capacity set %p (capacity = %d)", __PRETTY_FUNCTION__, set, set->_capacity); break; default: CFAssert2(__CFSetGetType(set) != __kCFSetImmutable, __kCFLogAssertion, "%s(): immutable set %p passed to mutating operation", __PRETTY_FUNCTION__, set); break; } __CFSetFindBuckets2(set, value, &match, &nomatch); if (match) { return match->_key; } else { cb = __CFSetGetCallBacks(set); if (cb->retain) { newValue = (void *)INVOKE_CALLBACK3(((const void *(*)(CFAllocatorRef, const void *, void *))cb->retain), __CFGetAllocator(set), value, set->_context); } else { newValue = value; } if (set->_emptyMarker == newValue) { __CFSetFindNewEmptyMarker(set); } if (set->_deletedMarker == newValue) { __CFSetFindNewDeletedMarker(set); } nomatch->_key = newValue; set->_bucketsUsed++; set->_count++; return newValue; } }
// This function is for Foundation's benefit; no one else should use it. void _CFArraySetCapacity(CFMutableArrayRef array, CFIndex cap) { if (CF_IS_OBJC(__kCFArrayTypeID, array)) return; __CFGenericValidateType(array, __kCFArrayTypeID); CFAssert1(__CFArrayGetType(array) != __kCFArrayImmutable, __kCFLogAssertion, "%s(): array is immutable", __PRETTY_FUNCTION__); CFAssert3(__CFArrayGetCount(array) <= cap, __kCFLogAssertion, "%s(): desired capacity (%d) is less than count (%d)", __PRETTY_FUNCTION__, cap, __CFArrayGetCount(array)); CHECK_FOR_MUTATION(array); BEGIN_MUTATION(array); // Currently, attempting to set the capacity of an array which is the CFStorage // variant, or set the capacity larger than __CF_MAX_BUCKETS_PER_DEQUE, has no // effect. The primary purpose of this API is to help avoid a bunch of the // resizes at the small capacities 4, 8, 16, etc. if (__CFArrayGetType(array) == __kCFArrayDeque) { struct __CFArrayDeque *deque = (struct __CFArrayDeque *)array->_store; CFIndex capacity = __CFArrayDequeRoundUpCapacity(cap); CFIndex size = sizeof(struct __CFArrayDeque) + capacity * sizeof(struct __CFArrayBucket); CFAllocatorRef allocator = __CFGetAllocator(array); allocator = _CFConvertAllocatorToGCRefZeroEquivalent(allocator); Boolean collectableMemory = CF_IS_COLLECTABLE_ALLOCATOR(allocator); if (NULL == deque) { deque = (struct __CFArrayDeque *)CFAllocatorAllocate(allocator, size, isStrongMemory(array) ? __kCFAllocatorGCScannedMemory : 0); if (NULL == deque) __CFArrayHandleOutOfMemory(array, size); if (__CFOASafe) __CFSetLastAllocationEventName(deque, "CFArray (store-deque)"); deque->_leftIdx = capacity / 2; } else { struct __CFArrayDeque *olddeque = deque; CFIndex oldcap = deque->_capacity; deque = (struct __CFArrayDeque *)CFAllocatorAllocate(allocator, size, isStrongMemory(array) ? __kCFAllocatorGCScannedMemory : 0); if (NULL == deque) __CFArrayHandleOutOfMemory(array, size); objc_memmove_collectable(deque, olddeque, sizeof(struct __CFArrayDeque) + oldcap * sizeof(struct __CFArrayBucket)); if (!collectableMemory) CFAllocatorDeallocate(allocator, olddeque); if (__CFOASafe) __CFSetLastAllocationEventName(deque, "CFArray (store-deque)"); } deque->_capacity = capacity; __CFAssignWithWriteBarrier((void **)&array->_store, (void *)deque); } END_MUTATION(array); }
static void __CFSetGrow(CFMutableSetRef set, CFIndex numNewValues) { struct __CFSetBucket *oldbuckets = set->_buckets; CFIndex idx, oldnbuckets = set->_bucketsNum; CFIndex oldCount = set->_count; set->_capacity = __CFSetRoundUpCapacity(oldCount + numNewValues); set->_bucketsNum = __CFSetNumBucketsForCapacity(set->_capacity); set->_buckets = CFAllocatorAllocate(__CFGetAllocator(set), set->_bucketsNum * sizeof(struct __CFSetBucket), 0); if (NULL == set->_buckets) HALT; if (__CFOASafe) __CFSetLastAllocationEventName(set->_buckets, "CFSet (store)"); for (idx = set->_bucketsNum; idx--;) { set->_buckets[idx]._key = set->_emptyMarker; } if (NULL == oldbuckets) return; for (idx = 0; idx < oldnbuckets; idx++) { if (__CFSetBucketIsOccupied(set, &oldbuckets[idx])) { struct __CFSetBucket *match, *nomatch; __CFSetFindBuckets2(set, oldbuckets[idx]._key, &match, &nomatch); CFAssert3(!match, __kCFLogAssertion, "%s(): two values (%p, %p) now hash to the same slot; mutable value changed while in table or hash value is not immutable", __PRETTY_FUNCTION__, oldbuckets[idx]._key, match->_key); nomatch->_key = oldbuckets[idx]._key; } } CFAssert1(set->_count == oldCount, __kCFLogAssertion, "%s(): set count differs after rehashing; error", __PRETTY_FUNCTION__); CFAllocatorDeallocate(__CFGetAllocator(set), oldbuckets); }
// NULL bytesDeallocator to this function does not mean the default allocator, it means // that there should be no deallocator, and the bytes should be copied. static CFMutableDataRef __CFDataInit(CFAllocatorRef allocator, CFOptionFlags flags, CFIndex capacity, const uint8_t *bytes, CFIndex length, CFAllocatorRef bytesDeallocator) { CFMutableDataRef memory; __CFGenericValidateMutabilityFlags(flags); CFAssert2(0 <= capacity, __kCFLogAssertion, "%s(): capacity (%d) cannot be less than zero", __PRETTY_FUNCTION__, capacity); CFAssert3(kCFFixedMutable != __CFMutableVarietyFromFlags(flags) || length <= capacity, __kCFLogAssertion, "%s(): for kCFFixedMutable type, capacity (%d) must be greater than or equal to number of initial elements (%d)", __PRETTY_FUNCTION__, capacity, length); CFAssert2(0 <= length, __kCFLogAssertion, "%s(): length (%d) cannot be less than zero", __PRETTY_FUNCTION__, length); Boolean collectableMemory = CF_IS_COLLECTABLE_ALLOCATOR(allocator); Boolean noCopy = bytesDeallocator != NULL; Boolean isMutable = ((flags & __kCFMutable) != 0); Boolean isGrowable = ((flags & __kCFGrowable) != 0); Boolean allocateInline = !isGrowable && !noCopy && capacity < INLINE_BYTES_THRESHOLD; allocator = (allocator == NULL) ? __CFGetDefaultAllocator() : allocator; Boolean useAllocator = (allocator != kCFAllocatorSystemDefault && allocator != kCFAllocatorMalloc && allocator != kCFAllocatorMallocZone); CFIndex size = sizeof(struct __CFData) - sizeof(CFRuntimeBase); if (allocateInline) { size += sizeof(uint8_t) * __CFDataNumBytesForCapacity(capacity) + sizeof(uint8_t) * 15; // for 16-byte alignment fixup } memory = (CFMutableDataRef)_CFRuntimeCreateInstance(allocator, __kCFDataTypeID, size, NULL); if (NULL == memory) { return NULL; } __CFDataSetNumBytesUsed(memory, 0); __CFDataSetLength(memory, 0); __CFDataSetInfoBits(memory, (allocateInline ? __kCFBytesInline : 0) | (useAllocator ? __kCFUseAllocator : 0) | (collectableMemory ? __kCFAllocatesCollectable : 0)); BOOL finalize = YES; BOOL scan = YES; if (collectableMemory) { if (allocateInline) { // We have no pointer to anything that needs to be reclaimed, so don't scan or finalize. scan = NO; finalize = NO; } else if (noCopy) { if (CF_IS_COLLECTABLE_ALLOCATOR(bytesDeallocator)) { // We're taking responsibility for externally GC-allocated memory, so scan us, but we don't need to finalize. finalize = NO; } else if (bytesDeallocator == kCFAllocatorNull) { // We don't have responsibility for these bytes, so there's no need to be scanned and we don't need to finalize. scan = NO; finalize = NO; } else { // We have a pointer to non-GC-allocated memory, so don't scan, but do finalize. scan = NO; } } if (!scan) auto_zone_set_unscanned(objc_collectableZone(), memory); if (!finalize) auto_zone_set_nofinalize(objc_collectableZone(), memory); } if (isMutable && isGrowable) { __CFDataSetCapacity(memory, __CFDataRoundUpCapacity(1)); __CFDataSetNumBytes(memory, __CFDataNumBytesForCapacity(__CFDataRoundUpCapacity(1))); __CFSetMutableVariety(memory, kCFMutable); } else { /* Don't round up capacity */ __CFDataSetCapacity(memory, capacity); __CFDataSetNumBytes(memory, __CFDataNumBytesForCapacity(capacity)); __CFSetMutableVariety(memory, kCFFixedMutable); } if (noCopy) { __CFAssignWithWriteBarrier((void **)&memory->_bytes, (uint8_t *)bytes); if (finalize) { if (_CFAllocatorIsGCRefZero(bytesDeallocator)) { memory->_bytesDeallocator = bytesDeallocator; } else { memory->_bytesDeallocator = (CFAllocatorRef)CFRetain(_CFConvertAllocatorToNonGCRefZeroEquivalent(bytesDeallocator)); } } if (CF_IS_COLLECTABLE_ALLOCATOR(bytesDeallocator) && !_CFAllocatorIsGCRefZero(bytesDeallocator)) { // When given a GC allocator which is not one of the GCRefZero ones as the deallocator, we assume that the no-copy memory is GC-allocated with a retain count of (at least) 1 and we should release it now instead of waiting until __CFDataDeallocate. auto_zone_release(objc_collectableZone(), memory->_bytes); } __CFDataSetNumBytesUsed(memory, length); __CFDataSetLength(memory, length); // Mutable no-copy datas are not allowed, so don't bother setting needsToZero flag. } else { Boolean cleared = (isMutable && !isGrowable && !_CFExecutableLinkedOnOrAfter(CFSystemVersionSnowLeopard)); if (!allocateInline) { // assume that allocators give 16-byte aligned memory back -- it is their responsibility __CFAssignWithWriteBarrier((void **)&memory->_bytes, __CFDataAllocate(memory, __CFDataNumBytes(memory) * sizeof(uint8_t), cleared)); if (__CFOASafe) __CFSetLastAllocationEventName(memory->_bytes, "CFData (store)"); if (NULL == memory->_bytes) { CFRelease(memory); return NULL; } } else { if (length == 0 && !isMutable) { // NSData sets its bytes pointer to NULL when its length is zero. Starting in 10.7 we do the same for CFData. memory->_bytes = NULL; // It is important to set this data as not inlined, so we do not recalculate a bytes pointer from null. __CFDataSetInline(memory, false); } cleared = true; } __CFDataSetNeedsToZero(memory, !cleared); memory->_bytesDeallocator = NULL; CFDataReplaceBytes(memory, CFRangeMake(0, 0), bytes, length); } __CFSetMutableVariety(memory, __CFMutableVarietyFromFlags(flags)); return memory; }
CF_INLINE void __CFArrayValidateRange(CFArrayRef array, CFRange range, const char *func) { CFAssert3(0 <= range.location && range.location <= CFArrayGetCount(array), __kCFLogAssertion, "%s(): range.location index (%d) out of bounds (0, %d)", func, range.location, CFArrayGetCount(array)); CFAssert2(0 <= range.length, __kCFLogAssertion, "%s(): range.length (%d) cannot be less than zero", func, range.length); CFAssert3(range.location + range.length <= CFArrayGetCount(array), __kCFLogAssertion, "%s(): ending index (%d) out of bounds (0, %d)", func, range.location + range.length, CFArrayGetCount(array)); }