static CFArrayRef __CFArrayInit(CFAllocatorRef allocator, UInt32 flags, CFIndex capacity, const CFArrayCallBacks *callBacks) {
    struct __CFArray *memory;
    UInt32 size;
    __CFBitfieldSetValue(flags, 31, 2, 0);
    if (CF_IS_COLLECTABLE_ALLOCATOR(allocator)) {
	if (!callBacks || (callBacks->retain == NULL && callBacks->release == NULL)) {
	    __CFBitfieldSetValue(flags, 4, 4, 1); // setWeak
	}
    }
    if (__CFArrayCallBacksMatchNull(callBacks)) {
	__CFBitfieldSetValue(flags, 3, 2, __kCFArrayHasNullCallBacks);
    } else if (__CFArrayCallBacksMatchCFType(callBacks)) {
	__CFBitfieldSetValue(flags, 3, 2, __kCFArrayHasCFTypeCallBacks);
    } else {
	__CFBitfieldSetValue(flags, 3, 2, __kCFArrayHasCustomCallBacks);
    }
    size = __CFArrayGetSizeOfType(flags) - sizeof(CFRuntimeBase);
    switch (__CFBitfieldGetValue(flags, 1, 0)) {
    case __kCFArrayImmutable:
	size += capacity * sizeof(struct __CFArrayBucket);
	break;
    case __kCFArrayDeque:
	break;
    }
    memory = (struct __CFArray*)_CFRuntimeCreateInstance(allocator, __kCFArrayTypeID, size, NULL);
    if (NULL == memory) {
	return NULL;
    }
    __CFBitfieldSetValue(memory->_base._cfinfo[CF_INFO_BITS], 6, 0, flags);
    __CFArraySetCount((CFArrayRef)memory, 0);
    switch (__CFBitfieldGetValue(flags, 1, 0)) {
    case __kCFArrayImmutable:
        if (isWeakMemory(memory)) {  // if weak, don't scan
            auto_zone_set_unscanned(objc_collectableZone(), memory);
        }
	if (__CFOASafe) __CFSetLastAllocationEventName(memory, "CFArray (immutable)");
	break;
    case __kCFArrayDeque:
	if (__CFOASafe) __CFSetLastAllocationEventName(memory, "CFArray (mutable-variable)");
	((struct __CFArray *)memory)->_mutations = 1;
	((struct __CFArray *)memory)->_mutInProgress = 0;
	((struct __CFArray*)memory)->_store = NULL;
	break;
    }
    if (__kCFArrayHasCustomCallBacks == __CFBitfieldGetValue(flags, 3, 2)) {
	CFArrayCallBacks *cb = (CFArrayCallBacks *)__CFArrayGetCallBacks((CFArrayRef)memory);
	*cb = *callBacks;
	FAULT_CALLBACK((void **)&(cb->retain));
	FAULT_CALLBACK((void **)&(cb->release));
	FAULT_CALLBACK((void **)&(cb->copyDescription));
	FAULT_CALLBACK((void **)&(cb->equal));
    }
    return (CFArrayRef)memory;
}
Beispiel #2
0
CF_INLINE CFIndex __CFSetGetSizeOfType(CFIndex t) {
    CFIndex size = sizeof(struct __CFSet);
    if (__CFBitfieldGetValue(t, 3, 2) == __kCFSetHasCustomCallBacks) {
	size += sizeof(CFSetCallBacks);
    }
    return size;
}
Beispiel #3
0
CF_EXPORT CFTypeRef _CFRetain(CFTypeRef cf) {
    if (NULL == cf) return NULL;
#if __LP64__
    uint32_t lowBits;
    do {
        lowBits = ((CFRuntimeBase *)cf)->_rc;
        if (0 == lowBits) return cf;	// Constant CFTypeRef
    } while (!_CFAtomicCompareAndSwap32Barrier(lowBits, lowBits + 1, (int32_t *)&((CFRuntimeBase *)cf)->_rc));
#else
#define RC_START 24
#define RC_END 31
    volatile UInt32 *infoLocation = (UInt32 *)&(((CFRuntimeBase *)cf)->_cfinfo);
    CFIndex rcLowBits = __CFBitfieldGetValue(*infoLocation, RC_END, RC_START);
    if (__builtin_expect(0 == rcLowBits, 0)) return cf;	// Constant CFTypeRef
    bool success = 0;
    do {
        UInt32 initialCheckInfo = *infoLocation;
        UInt32 prospectiveNewInfo = initialCheckInfo; // don't want compiler to generate prospectiveNewInfo = *infoLocation.  This is why infoLocation is declared as a pointer to volatile memory.
        prospectiveNewInfo += (1 << RC_START);
        rcLowBits = __CFBitfieldGetValue(prospectiveNewInfo, RC_END, RC_START);
        if (__builtin_expect((rcLowBits & 0x7f) == 0, 0)) {
            /* Roll over another bit to the external ref count
             Real ref count = low 7 bits of info[CF_RC_BITS]  + external ref count << 6
             Bit 8 of low bits indicates that external ref count is in use.
             External ref count is shifted by 6 rather than 7 so that we can set the low
            bits to to 1100 0000 rather than 1000 0000.
            	This prevents needing to access the external ref count for successive retains and releases
            	when the composite retain count is right around a multiple of 1 << 7.
                       */
            prospectiveNewInfo = initialCheckInfo;
            __CFBitfieldSetValue(prospectiveNewInfo, RC_END, RC_START, ((1 << 7) | (1 << 6)));
            __CFSpinLock(&__CFRuntimeExternRefCountTableLock);
            success = _CFAtomicCompareAndSwap32Barrier(*(int32_t *)&initialCheckInfo, *(int32_t *)&prospectiveNewInfo, (int32_t *)infoLocation);
            if (__builtin_expect(success, 1)) {
                CFBagAddValue(__CFRuntimeExternRefCountTable, DISGUISE(cf));
            }
            __CFSpinUnlock(&__CFRuntimeExternRefCountTableLock);
        } else {
            success = _CFAtomicCompareAndSwap32Barrier(*(int32_t *)&initialCheckInfo, *(int32_t *)&prospectiveNewInfo, (int32_t *)infoLocation);
        }
    } while (__builtin_expect(!success, 0));
#endif
    if (__builtin_expect(__CFOASafe, 0)) {
        __CFRecordAllocationEvent(__kCFRetainEvent, (void *)cf, 0, _CFGetRetainCount(cf), NULL);
    }
    return cf;
}
Beispiel #4
0
CF_INLINE const CFSetCallBacks *__CFSetGetCallBacks(CFSetRef set) {
    CFSetCallBacks *result = NULL;
    switch (__CFBitfieldGetValue(((const CFRuntimeBase *)set)->_info, 3, 2)) {
    case __kCFSetHasNullCallBacks:
	return &__kCFNullSetCallBacks;
    case __kCFSetHasCFTypeCallBacks:
	return &kCFTypeSetCallBacks;
    case __kCFSetHasCustomCallBacks:
	break;
    }
    result = (CFSetCallBacks *)((uint8_t *)set + sizeof(struct __CFSet));
    return result;
}
Beispiel #5
0
CFSetRef CFSetCreate(CFAllocatorRef allocator, const void **values, CFIndex numValues, const CFSetCallBacks *callBacks) {
    CFSetRef result;
    UInt32 flags;
    CFIndex idx;
    CFAssert2(0 <= numValues, __kCFLogAssertion, "%s(): numValues (%d) cannot be less than zero", __PRETTY_FUNCTION__, numValues);
    result = __CFSetInit(allocator, __kCFSetImmutable, numValues, callBacks);
    flags = __CFBitfieldGetValue(((const CFRuntimeBase *)result)->_info, 1, 0);
    if (flags == __kCFSetImmutable) {
        __CFBitfieldSetValue(((CFRuntimeBase *)result)->_info, 1, 0, __kCFSetFixedMutable);
    }
    for (idx = 0; idx < numValues; idx++) {
	CFSetAddValue((CFMutableSetRef)result, values[idx]);
    }
    __CFBitfieldSetValue(((CFRuntimeBase *)result)->_info, 1, 0, flags);
    return result;
}
CF_PRIVATE CFArrayCallBacks *__CFArrayGetCallBacks(CFArrayRef array) {
    CFArrayCallBacks *result = NULL;
    switch (__CFBitfieldGetValue(((const CFRuntimeBase *)array)->_cfinfo[CF_INFO_BITS], 3, 2)) {
    case __kCFArrayHasNullCallBacks:
	return (CFArrayCallBacks *)&__kCFNullArrayCallBacks;
    case __kCFArrayHasCFTypeCallBacks:
	return (CFArrayCallBacks *)&kCFTypeArrayCallBacks;
    case __kCFArrayHasCustomCallBacks:
	break;
    }
    switch (__CFArrayGetType(array)) {
    case __kCFArrayImmutable:
	result = (CFArrayCallBacks *)((uint8_t *)array + sizeof(struct __CFArray));
	break;
    case __kCFArrayDeque:
	result = (CFArrayCallBacks *)((uint8_t *)array + sizeof(struct __CFArray));
	break;
    }
    return result;
}
Beispiel #7
0
static CFSetRef __CFSetInit(CFAllocatorRef allocator, UInt32 flags, CFIndex capacity, const CFSetCallBacks *callBacks) {
    struct __CFSet *memory;
    UInt32 size;
    CFIndex idx;
    __CFBitfieldSetValue(flags, 31, 2, 0);
    if (__CFSetCallBacksMatchNull(callBacks)) {
	__CFBitfieldSetValue(flags, 3, 2, __kCFSetHasNullCallBacks);
    } else if (__CFSetCallBacksMatchCFType(callBacks)) {
	__CFBitfieldSetValue(flags, 3, 2, __kCFSetHasCFTypeCallBacks);
    } else {
	__CFBitfieldSetValue(flags, 3, 2, __kCFSetHasCustomCallBacks);
    }
    size = __CFSetGetSizeOfType(flags) - sizeof(CFRuntimeBase);
    switch (__CFBitfieldGetValue(flags, 1, 0)) {
    case __kCFSetImmutable:
    case __kCFSetFixedMutable:
	size += __CFSetNumBucketsForCapacity(capacity) * sizeof(struct __CFSetBucket);
	break;
    case __kCFSetMutable:
	break;
    }
    memory = (struct __CFSet *)_CFRuntimeCreateInstance(allocator, __kCFSetTypeID, size, NULL);
    if (NULL == memory) {
	return NULL;
    }
    __CFBitfieldSetValue(memory->_base._info, 6, 0, flags);
    memory->_count = 0;
    memory->_bucketsUsed = 0;
    memory->_emptyMarker = (const void *)0xa1b1c1d3;
    memory->_deletedMarker = (const void *)0xa1b1c1d5;
    memory->_context = NULL;
    switch (__CFBitfieldGetValue(flags, 1, 0)) {
    case __kCFSetImmutable:
	if (__CFOASafe) __CFSetLastAllocationEventName(memory, "CFSet (immutable)");
	memory->_capacity = capacity;	/* Don't round up capacity */
	memory->_bucketsNum = __CFSetNumBucketsForCapacity(memory->_capacity);
	memory->_buckets = (struct __CFSetBucket *)((uint8_t *)memory + __CFSetGetSizeOfType(flags));
	for (idx = memory->_bucketsNum; idx--;) {
	    memory->_buckets[idx]._key = memory->_emptyMarker;
	}
	break;
    case __kCFSetFixedMutable:
	if (__CFOASafe) __CFSetLastAllocationEventName(memory, "CFSet (mutable-fixed)");
	memory->_capacity = capacity;	/* Don't round up capacity */
	memory->_bucketsNum = __CFSetNumBucketsForCapacity(memory->_capacity);
	memory->_buckets = (struct __CFSetBucket *)((uint8_t *)memory + __CFSetGetSizeOfType(flags));
	for (idx = memory->_bucketsNum; idx--;) {
	    memory->_buckets[idx]._key = memory->_emptyMarker;
	}
	break;
    case __kCFSetMutable:
	if (__CFOASafe) __CFSetLastAllocationEventName(memory, "CFSet (mutable-variable)");
	memory->_capacity = __CFSetRoundUpCapacity(1);
	memory->_bucketsNum = 0;
	memory->_buckets = NULL;
	break;
    }
    if (__kCFSetHasCustomCallBacks == __CFBitfieldGetValue(flags, 3, 2)) {
	const CFSetCallBacks *cb = __CFSetGetCallBacks((CFSetRef)memory);
	*(CFSetCallBacks *)cb = *callBacks;
	FAULT_CALLBACK((void **)&(cb->retain));
	FAULT_CALLBACK((void **)&(cb->release));
	FAULT_CALLBACK((void **)&(cb->copyDescription));
	FAULT_CALLBACK((void **)&(cb->equal));
	FAULT_CALLBACK((void **)&(cb->hash));
    }
    return (CFSetRef)memory;
}
CF_INLINE Boolean __CFMessagePortExtraMachRef(CFMessagePortRef ms) {
    return (Boolean)__CFBitfieldGetValue(((const CFRuntimeBase *)ms)->_cfinfo[CF_INFO_BITS], 1, 1);
}
CF_INLINE UInt32 __CFMutableVariety(const void *cf) {
    return __CFBitfieldGetValue(((const CFRuntimeBase *)cf)->_cfinfo[CF_INFO_BITS], 1, 0);
}
Beispiel #10
0
CF_INLINE UInt32 __CFBinaryHeapMutableVarietyFromFlags(UInt32 flags) {
    return __CFBitfieldGetValue(flags, 1, 0);
}
Beispiel #11
0
CF_INLINE Boolean __CFMessagePortIsDeallocing(CFMessagePortRef ms) {
    return (Boolean)__CFBitfieldGetValue(((const CFRuntimeBase *)ms)->_info, 3, 3);
}
Beispiel #12
0
CF_EXPORT void _CFRelease(CFTypeRef cf) {
    Boolean isAllocator = false;
#if __LP64__
    uint32_t lowBits;
    do {
        lowBits = ((CFRuntimeBase *)cf)->_rc;
        if (0 == lowBits) return;	// Constant CFTypeRef
        if (1 == lowBits) {
            // CANNOT WRITE ANY NEW VALUE INTO [CF_RC_BITS] UNTIL AFTER FINALIZATION
            CFTypeID typeID = __CFGenericTypeID_inline(cf);
            isAllocator = (__kCFAllocatorTypeID_CONST == typeID);
            CFRuntimeClass *cfClass = __CFRuntimeClassTable[typeID];
            if (cfClass->version & _kCFRuntimeResourcefulObject && cfClass->reclaim != NULL) {
                cfClass->reclaim(cf);
            }
            void (*func)(CFTypeRef) = __CFRuntimeClassTable[typeID]->finalize;
            if (NULL != func) {
                func(cf);
            }
            // We recheck lowBits to see if the object has been retained again during
            // the finalization process.  This allows for the finalizer to resurrect,
            // but the main point is to allow finalizers to be able to manage the
            // removal of objects from uniquing caches, which may race with other threads
            // which are allocating (looking up and finding) objects from those caches,
            // which (that thread) would be the thing doing the extra retain in that case.
            if (isAllocator || _CFAtomicCompareAndSwap32Barrier(1, 0, (int32_t *)&((CFRuntimeBase *)cf)->_rc)) {
                goto really_free;
            }
        }
    } while (!_CFAtomicCompareAndSwap32Barrier(lowBits, lowBits - 1, (int32_t *)&((CFRuntimeBase *)cf)->_rc));
#else
    volatile UInt32 *infoLocation = (UInt32 *)&(((CFRuntimeBase *)cf)->_cfinfo);
    CFIndex rcLowBits = __CFBitfieldGetValue(*infoLocation, RC_END, RC_START);
    if (__builtin_expect(0 == rcLowBits, 0)) return;        // Constant CFTypeRef
    bool success = 0;
    do {
        UInt32 initialCheckInfo = *infoLocation;
        rcLowBits = __CFBitfieldGetValue(initialCheckInfo, RC_END, RC_START);
        if (__builtin_expect(1 == rcLowBits, 0)) {
            // we think cf should be deallocated
            if (__builtin_expect(__kCFAllocatorTypeID_CONST == __CFGenericTypeID_inline(cf), 0)) {
                if (__builtin_expect(__CFOASafe, 0)) __CFRecordAllocationEvent(__kCFReleaseEvent, (void *)cf, 0, 0, NULL);
                __CFAllocatorDeallocate((void *)cf);
                success = 1;
            } else {
                // CANNOT WRITE ANY NEW VALUE INTO [CF_RC_BITS] UNTIL AFTER FINALIZATION
                CFTypeID typeID = __CFGenericTypeID_inline(cf);
                CFRuntimeClass *cfClass = __CFRuntimeClassTable[typeID];
                if (cfClass->version & _kCFRuntimeResourcefulObject && cfClass->reclaim != NULL) {
                    cfClass->reclaim(cf);
                }
                if (NULL != __CFRuntimeClassTable[typeID]->finalize) {
                    __CFRuntimeClassTable[__CFGenericTypeID_inline(cf)]->finalize(cf);
                }
                // We recheck rcLowBits to see if the object has been retained again during
                // the finalization process.  This allows for the finalizer to resurrect,
                // but the main point is to allow finalizers to be able to manage the
                // removal of objects from uniquing caches, which may race with other threads
                // which are allocating (looking up and finding) objects from those caches,
                // which (that thread) would be the thing doing the extra retain in that case.
                rcLowBits = __CFBitfieldGetValue(*infoLocation, RC_END, RC_START);
                success = (1 == rcLowBits);
                if (__builtin_expect(success, 1)) {
                    goto really_free;
                }
            }
        } else {
            // not yet junk
            UInt32 prospectiveNewInfo = initialCheckInfo; // don't want compiler to generate prospectiveNewInfo = *infoLocation.  This is why infoLocation is declared as a pointer to volatile memory.
            if (__builtin_expect((1 << 7) == rcLowBits, 0)) {
                // Time to remove a bit from the external ref count
                __CFSpinLock(&__CFRuntimeExternRefCountTableLock);
                CFIndex rcHighBitsCnt = CFBagGetCountOfValue(__CFRuntimeExternRefCountTable, DISGUISE(cf));
                if (1 == rcHighBitsCnt) {
                    __CFBitfieldSetValue(prospectiveNewInfo, RC_END, RC_START, (1 << 6) - 1);
                } else {
                    __CFBitfieldSetValue(prospectiveNewInfo, RC_END, RC_START, ((1 << 6) | (1 << 7)) - 1);
                }
                success = _CFAtomicCompareAndSwap32Barrier(*(int32_t *)&initialCheckInfo, *(int32_t *)&prospectiveNewInfo, (int32_t *)infoLocation);
                if (__builtin_expect(success, 1)) {
                    CFBagRemoveValue(__CFRuntimeExternRefCountTable, DISGUISE(cf));
                }
                __CFSpinUnlock(&__CFRuntimeExternRefCountTableLock);
            } else {
                prospectiveNewInfo -= (1 << RC_START);
                success = _CFAtomicCompareAndSwap32Barrier(*(int32_t *)&initialCheckInfo, *(int32_t *)&prospectiveNewInfo, (int32_t *)infoLocation);
            }
        }
    } while (__builtin_expect(!success, 0));

#endif
    if (__builtin_expect(__CFOASafe, 0)) {
        __CFRecordAllocationEvent(__kCFReleaseEvent, (void *)cf, 0, _CFGetRetainCount(cf), NULL);
    }
    return;

really_free:
    ;
    if (__builtin_expect(__CFOASafe, 0)) {
        // do not use _CFGetRetainCount() because cf has been freed if it was an allocator
        __CFRecordAllocationEvent(__kCFReleaseEvent, (void *)cf, 0, 0, NULL);
    }
    // cannot zombify allocators, which get deallocated by __CFAllocatorDeallocate (finalize)
    if (!isAllocator) {
        CFAllocatorRef allocator;
        Boolean usesSystemDefaultAllocator;

        if (__CFBitfieldGetValue(((const CFRuntimeBase *)cf)->_cfinfo[CF_INFO_BITS], 7, 7)) {
            allocator = kCFAllocatorSystemDefault;
        } else {
            allocator = CFGetAllocator(cf);
        }
        usesSystemDefaultAllocator = (allocator == kCFAllocatorSystemDefault);

        if (__CFZombieLevel & (1 << 0)) {
            uint8_t *ptr = (uint8_t *)cf - (usesSystemDefaultAllocator ? 0 : sizeof(CFAllocatorRef));
            size_t size = malloc_size(ptr);
            uint8_t byte = 0xFC;
            if (__CFZombieLevel & (1 << 1)) {
                ptr = (uint8_t *)cf + sizeof(CFRuntimeBase);
                size = size - sizeof(CFRuntimeBase) - (usesSystemDefaultAllocator ? 0 : sizeof(CFAllocatorRef));
            }
            if (__CFZombieLevel & (1 << 7)) {
                byte = (__CFZombieLevel >> 8) & 0xFF;
            }
Beispiel #13
0
CF_INLINE uint32_t __CFTreeGetCallBacksType(CFTreeRef tree) {
    return (__CFBitfieldGetValue(tree->_base._cfinfo[CF_INFO_BITS], 1, 0));
}
Beispiel #14
0
CF_INLINE CFIndex __CFLocaleGetType(CFLocaleRef locale) {
    return __CFBitfieldGetValue(((const CFRuntimeBase *)locale)->_cfinfo[CF_INFO_BITS], 1, 0);
}
CF_INLINE bool isWeakMemory(CFTypeRef collection) {
    return __CFBitfieldGetValue(((const CFRuntimeBase *)collection)->_cfinfo[CF_INFO_BITS], 4, 4) != 0;
}
CF_INLINE Boolean __CFWindowsMessageQueueIsValid(CFWindowsMessageQueueRef wmq) {
    return (Boolean)__CFBitfieldGetValue(((const CFRuntimeBase *)wmq)->_cfinfo[CF_INFO_BITS], 3, 3);
}
CF_INLINE bool hasBeenFinalized(CFTypeRef collection) {
    return __CFBitfieldGetValue(((const CFRuntimeBase *)collection)->_cfinfo[CF_INFO_BITS], 5, 5) != 0;
}
Beispiel #18
0
CF_INLINE Boolean __CFMessagePortIsRemote(CFMessagePortRef ms) {
    return (Boolean)__CFBitfieldGetValue(((const CFRuntimeBase *)ms)->_info, 2, 2);
}
CF_INLINE CFIndex __CFArrayGetType(CFArrayRef array) {
    return __CFBitfieldGetValue(((const CFRuntimeBase *)array)->_cfinfo[CF_INFO_BITS], 1, 0);
}
Beispiel #20
0
CF_INLINE bool isStrongMemory_Heap(CFTypeRef collection) {
    return __CFBitfieldGetValue(((const CFRuntimeBase *)collection)->_cfinfo[CF_INFO_BITS], 4, 4) == 0;
}
Beispiel #21
0
CF_INLINE CFIndex __CFSetGetType(CFSetRef set) {
    return __CFBitfieldGetValue(((const CFRuntimeBase *)set)->_info, 1, 0);
}
CF_INLINE Boolean __CFDataNeedsToZero(CFDataRef data) {
    return __CFBitfieldGetValue(((CFRuntimeBase *)data)->_cfinfo[CF_INFO_BITS], 6, 6);
}
CF_INLINE Boolean __CFMessagePortIsValid(CFMessagePortRef ms) {
    return (Boolean)__CFBitfieldGetValue(((const CFRuntimeBase *)ms)->_cfinfo[CF_INFO_BITS], 0, 0);
}