static void __CFAllocatorCustomDestroy(malloc_zone_t *zone) {
    CFAllocatorRef allocator = (CFAllocatorRef)zone;
    // !!! we do it, and caller of malloc_destroy_zone() assumes
    // COMPLETE responsibility for the result; NO Apple library
    // code should be modified as a result of discovering that
    // some activity results in inconveniences to developers
    // trying to use malloc_destroy_zone() with a CFAllocatorRef;
    // that's just too bad for them.
    __CFAllocatorDeallocate(allocator);
}
Esempio n. 2
0
CF_EXPORT void _CFRelease(CFTypeRef cf) {
    Boolean isAllocator = false;
#if __LP64__
    uint32_t lowBits;
    do {
        lowBits = ((CFRuntimeBase *)cf)->_rc;
        if (0 == lowBits) return;	// Constant CFTypeRef
        if (1 == lowBits) {
            // CANNOT WRITE ANY NEW VALUE INTO [CF_RC_BITS] UNTIL AFTER FINALIZATION
            CFTypeID typeID = __CFGenericTypeID_inline(cf);
            isAllocator = (__kCFAllocatorTypeID_CONST == typeID);
            CFRuntimeClass *cfClass = __CFRuntimeClassTable[typeID];
            if (cfClass->version & _kCFRuntimeResourcefulObject && cfClass->reclaim != NULL) {
                cfClass->reclaim(cf);
            }
            void (*func)(CFTypeRef) = __CFRuntimeClassTable[typeID]->finalize;
            if (NULL != func) {
                func(cf);
            }
            // We recheck lowBits to see if the object has been retained again during
            // the finalization process.  This allows for the finalizer to resurrect,
            // but the main point is to allow finalizers to be able to manage the
            // removal of objects from uniquing caches, which may race with other threads
            // which are allocating (looking up and finding) objects from those caches,
            // which (that thread) would be the thing doing the extra retain in that case.
            if (isAllocator || _CFAtomicCompareAndSwap32Barrier(1, 0, (int32_t *)&((CFRuntimeBase *)cf)->_rc)) {
                goto really_free;
            }
        }
    } while (!_CFAtomicCompareAndSwap32Barrier(lowBits, lowBits - 1, (int32_t *)&((CFRuntimeBase *)cf)->_rc));
#else
    volatile UInt32 *infoLocation = (UInt32 *)&(((CFRuntimeBase *)cf)->_cfinfo);
    CFIndex rcLowBits = __CFBitfieldGetValue(*infoLocation, RC_END, RC_START);
    if (__builtin_expect(0 == rcLowBits, 0)) return;        // Constant CFTypeRef
    bool success = 0;
    do {
        UInt32 initialCheckInfo = *infoLocation;
        rcLowBits = __CFBitfieldGetValue(initialCheckInfo, RC_END, RC_START);
        if (__builtin_expect(1 == rcLowBits, 0)) {
            // we think cf should be deallocated
            if (__builtin_expect(__kCFAllocatorTypeID_CONST == __CFGenericTypeID_inline(cf), 0)) {
                if (__builtin_expect(__CFOASafe, 0)) __CFRecordAllocationEvent(__kCFReleaseEvent, (void *)cf, 0, 0, NULL);
                __CFAllocatorDeallocate((void *)cf);
                success = 1;
            } else {
                // CANNOT WRITE ANY NEW VALUE INTO [CF_RC_BITS] UNTIL AFTER FINALIZATION
                CFTypeID typeID = __CFGenericTypeID_inline(cf);
                CFRuntimeClass *cfClass = __CFRuntimeClassTable[typeID];
                if (cfClass->version & _kCFRuntimeResourcefulObject && cfClass->reclaim != NULL) {
                    cfClass->reclaim(cf);
                }
                if (NULL != __CFRuntimeClassTable[typeID]->finalize) {
                    __CFRuntimeClassTable[__CFGenericTypeID_inline(cf)]->finalize(cf);
                }
                // We recheck rcLowBits to see if the object has been retained again during
                // the finalization process.  This allows for the finalizer to resurrect,
                // but the main point is to allow finalizers to be able to manage the
                // removal of objects from uniquing caches, which may race with other threads
                // which are allocating (looking up and finding) objects from those caches,
                // which (that thread) would be the thing doing the extra retain in that case.
                rcLowBits = __CFBitfieldGetValue(*infoLocation, RC_END, RC_START);
                success = (1 == rcLowBits);
                if (__builtin_expect(success, 1)) {
                    goto really_free;
                }
            }
        } else {
            // not yet junk
            UInt32 prospectiveNewInfo = initialCheckInfo; // don't want compiler to generate prospectiveNewInfo = *infoLocation.  This is why infoLocation is declared as a pointer to volatile memory.
            if (__builtin_expect((1 << 7) == rcLowBits, 0)) {
                // Time to remove a bit from the external ref count
                __CFSpinLock(&__CFRuntimeExternRefCountTableLock);
                CFIndex rcHighBitsCnt = CFBagGetCountOfValue(__CFRuntimeExternRefCountTable, DISGUISE(cf));
                if (1 == rcHighBitsCnt) {
                    __CFBitfieldSetValue(prospectiveNewInfo, RC_END, RC_START, (1 << 6) - 1);
                } else {
                    __CFBitfieldSetValue(prospectiveNewInfo, RC_END, RC_START, ((1 << 6) | (1 << 7)) - 1);
                }
                success = _CFAtomicCompareAndSwap32Barrier(*(int32_t *)&initialCheckInfo, *(int32_t *)&prospectiveNewInfo, (int32_t *)infoLocation);
                if (__builtin_expect(success, 1)) {
                    CFBagRemoveValue(__CFRuntimeExternRefCountTable, DISGUISE(cf));
                }
                __CFSpinUnlock(&__CFRuntimeExternRefCountTableLock);
            } else {
                prospectiveNewInfo -= (1 << RC_START);
                success = _CFAtomicCompareAndSwap32Barrier(*(int32_t *)&initialCheckInfo, *(int32_t *)&prospectiveNewInfo, (int32_t *)infoLocation);
            }
        }
    } while (__builtin_expect(!success, 0));

#endif
    if (__builtin_expect(__CFOASafe, 0)) {
        __CFRecordAllocationEvent(__kCFReleaseEvent, (void *)cf, 0, _CFGetRetainCount(cf), NULL);
    }
    return;

really_free:
    ;
    if (__builtin_expect(__CFOASafe, 0)) {
        // do not use _CFGetRetainCount() because cf has been freed if it was an allocator
        __CFRecordAllocationEvent(__kCFReleaseEvent, (void *)cf, 0, 0, NULL);
    }
    // cannot zombify allocators, which get deallocated by __CFAllocatorDeallocate (finalize)
    if (!isAllocator) {
        CFAllocatorRef allocator;
        Boolean usesSystemDefaultAllocator;

        if (__CFBitfieldGetValue(((const CFRuntimeBase *)cf)->_cfinfo[CF_INFO_BITS], 7, 7)) {
            allocator = kCFAllocatorSystemDefault;
        } else {
            allocator = CFGetAllocator(cf);
        }
        usesSystemDefaultAllocator = (allocator == kCFAllocatorSystemDefault);

        if (__CFZombieLevel & (1 << 0)) {
            uint8_t *ptr = (uint8_t *)cf - (usesSystemDefaultAllocator ? 0 : sizeof(CFAllocatorRef));
            size_t size = malloc_size(ptr);
            uint8_t byte = 0xFC;
            if (__CFZombieLevel & (1 << 1)) {
                ptr = (uint8_t *)cf + sizeof(CFRuntimeBase);
                size = size - sizeof(CFRuntimeBase) - (usesSystemDefaultAllocator ? 0 : sizeof(CFAllocatorRef));
            }
            if (__CFZombieLevel & (1 << 7)) {
                byte = (__CFZombieLevel >> 8) & 0xFF;
            }