Пример #1
0
CFSetRef CFSetCreate(CFAllocatorRef allocator, const void **values, CFIndex numValues, const CFSetCallBacks *callBacks) {
    CFSetRef result;
    UInt32 flags;
    CFIndex idx;
    CFAssert2(0 <= numValues, __kCFLogAssertion, "%s(): numValues (%d) cannot be less than zero", __PRETTY_FUNCTION__, numValues);
    result = __CFSetInit(allocator, __kCFSetImmutable, numValues, callBacks);
    flags = __CFBitfieldGetValue(((const CFRuntimeBase *)result)->_info, 1, 0);
    if (flags == __kCFSetImmutable) {
        __CFBitfieldSetValue(((CFRuntimeBase *)result)->_info, 1, 0, __kCFSetFixedMutable);
    }
    for (idx = 0; idx < numValues; idx++) {
	CFSetAddValue((CFMutableSetRef)result, values[idx]);
    }
    __CFBitfieldSetValue(((CFRuntimeBase *)result)->_info, 1, 0, flags);
    return result;
}
__private_extern__ CFArrayRef __CFArrayCreateTransfer(CFAllocatorRef allocator, const void **values, CFIndex numValues) {
    CFAssert2(0 <= numValues, __kCFLogAssertion, "%s(): numValues (%d) cannot be less than zero", __PRETTY_FUNCTION__, numValues);
    UInt32 flags = __kCFArrayImmutable;
    __CFBitfieldSetValue(flags, 31, 2, 0);
    __CFBitfieldSetValue(flags, 3, 2, __kCFArrayHasCFTypeCallBacks);
    UInt32 size = __CFArrayGetSizeOfType(flags) - sizeof(CFRuntimeBase);
    size += numValues * sizeof(struct __CFArrayBucket);
    struct __CFArray *memory = (struct __CFArray*)_CFRuntimeCreateInstance(allocator, __kCFArrayTypeID, size, NULL);
    if (NULL == memory) {
	return NULL;
    }
    __CFBitfieldSetValue(memory->_base._cfinfo[CF_INFO_BITS], 6, 0, flags);
    __CFArraySetCount(memory, numValues);
    memmove(__CFArrayGetBucketsPtr(memory), values, sizeof(void *) * numValues);
    if (__CFOASafe) __CFSetLastAllocationEventName(memory, "CFArray (immutable)");
    return (CFArrayRef)memory;
}
Пример #3
0
static void __CFSetDeallocate(CFTypeRef cf) {
    CFMutableSetRef set = (CFMutableSetRef)cf;
    CFAllocatorRef allocator = __CFGetAllocator(set);
    if (__CFSetGetType(set) == __kCFSetImmutable) {
        __CFBitfieldSetValue(((CFRuntimeBase *)set)->_info, 1, 0, __kCFSetFixedMutable);
    }
    CFSetRemoveAllValues(set);
    if (__CFSetGetType(set) == __kCFSetMutable && set->_buckets) {
	CFAllocatorDeallocate(allocator, set->_buckets);
    }
}
Пример #4
0
static CFBinaryHeapRef __CFBinaryHeapInit(CFAllocatorRef allocator, UInt32 flags, CFIndex capacity, const void **values, CFIndex numValues, const CFBinaryHeapCallBacks *callBacks, const CFBinaryHeapCompareContext *compareContext) {
    CFBinaryHeapRef memory;
    CFIndex idx;
    CFIndex size;

    CFAssert2(0 <= capacity, __kCFLogAssertion, "%s(): capacity (%d) cannot be less than zero", __PRETTY_FUNCTION__, capacity);
    CFAssert2(0 <= numValues, __kCFLogAssertion, "%s(): numValues (%d) cannot be less than zero", __PRETTY_FUNCTION__, numValues);
    size = sizeof(struct __CFBinaryHeap) - sizeof(CFRuntimeBase);
    if (CF_IS_COLLECTABLE_ALLOCATOR(allocator)) {
	if (!callBacks || (callBacks->retain == NULL && callBacks->release == NULL)) {
	    __CFBitfieldSetValue(flags, 4, 4, 1); // setWeak
	}
    }

    memory = (CFBinaryHeapRef)_CFRuntimeCreateInstance(allocator, __kCFBinaryHeapTypeID, size, NULL);
    if (NULL == memory) {
	return NULL;
    }
	__CFBinaryHeapSetCapacity(memory, __CFBinaryHeapRoundUpCapacity(1));
	__CFBinaryHeapSetNumBuckets(memory, __CFBinaryHeapNumBucketsForCapacity(__CFBinaryHeapRoundUpCapacity(1)));
	void *buckets = _CFAllocatorAllocateGC(allocator, __CFBinaryHeapNumBuckets(memory) * sizeof(struct __CFBinaryHeapBucket), isStrongMemory_Heap(memory) ? __kCFAllocatorGCScannedMemory : 0);
	__CFAssignWithWriteBarrier((void **)&memory->_buckets, buckets);
	if (__CFOASafe) __CFSetLastAllocationEventName(memory->_buckets, "CFBinaryHeap (store)");
	if (NULL == memory->_buckets) {
	    CFRelease(memory);
	    return NULL;
	}
    __CFBinaryHeapSetNumBucketsUsed(memory, 0);
    __CFBinaryHeapSetCount(memory, 0);
    if (NULL != callBacks) {
	memory->_callbacks.retain = callBacks->retain;
	memory->_callbacks.release = callBacks->release;
	memory->_callbacks.copyDescription = callBacks->copyDescription;
	memory->_callbacks.compare = callBacks->compare;
    } else {
	memory->_callbacks.retain = 0;
	memory->_callbacks.release = 0;
	memory->_callbacks.copyDescription = 0;
	memory->_callbacks.compare = 0;
    }
    if (compareContext) memcpy(&memory->_context, compareContext, sizeof(CFBinaryHeapCompareContext));
// CF: retain info for proper operation
    __CFBinaryHeapSetMutableVariety(memory, kCFBinaryHeapMutable);
    for (idx = 0; idx < numValues; idx++) {
	CFBinaryHeapAddValue(memory, values[idx]);
    }
    __CFBinaryHeapSetMutableVariety(memory, __CFBinaryHeapMutableVarietyFromFlags(flags));
    return memory;
}
Пример #5
0
CF_EXPORT CFTypeRef _CFRetain(CFTypeRef cf) {
    if (NULL == cf) return NULL;
#if __LP64__
    uint32_t lowBits;
    do {
        lowBits = ((CFRuntimeBase *)cf)->_rc;
        if (0 == lowBits) return cf;	// Constant CFTypeRef
    } while (!_CFAtomicCompareAndSwap32Barrier(lowBits, lowBits + 1, (int32_t *)&((CFRuntimeBase *)cf)->_rc));
#else
#define RC_START 24
#define RC_END 31
    volatile UInt32 *infoLocation = (UInt32 *)&(((CFRuntimeBase *)cf)->_cfinfo);
    CFIndex rcLowBits = __CFBitfieldGetValue(*infoLocation, RC_END, RC_START);
    if (__builtin_expect(0 == rcLowBits, 0)) return cf;	// Constant CFTypeRef
    bool success = 0;
    do {
        UInt32 initialCheckInfo = *infoLocation;
        UInt32 prospectiveNewInfo = initialCheckInfo; // don't want compiler to generate prospectiveNewInfo = *infoLocation.  This is why infoLocation is declared as a pointer to volatile memory.
        prospectiveNewInfo += (1 << RC_START);
        rcLowBits = __CFBitfieldGetValue(prospectiveNewInfo, RC_END, RC_START);
        if (__builtin_expect((rcLowBits & 0x7f) == 0, 0)) {
            /* Roll over another bit to the external ref count
             Real ref count = low 7 bits of info[CF_RC_BITS]  + external ref count << 6
             Bit 8 of low bits indicates that external ref count is in use.
             External ref count is shifted by 6 rather than 7 so that we can set the low
            bits to to 1100 0000 rather than 1000 0000.
            	This prevents needing to access the external ref count for successive retains and releases
            	when the composite retain count is right around a multiple of 1 << 7.
                       */
            prospectiveNewInfo = initialCheckInfo;
            __CFBitfieldSetValue(prospectiveNewInfo, RC_END, RC_START, ((1 << 7) | (1 << 6)));
            __CFSpinLock(&__CFRuntimeExternRefCountTableLock);
            success = _CFAtomicCompareAndSwap32Barrier(*(int32_t *)&initialCheckInfo, *(int32_t *)&prospectiveNewInfo, (int32_t *)infoLocation);
            if (__builtin_expect(success, 1)) {
                CFBagAddValue(__CFRuntimeExternRefCountTable, DISGUISE(cf));
            }
            __CFSpinUnlock(&__CFRuntimeExternRefCountTableLock);
        } else {
            success = _CFAtomicCompareAndSwap32Barrier(*(int32_t *)&initialCheckInfo, *(int32_t *)&prospectiveNewInfo, (int32_t *)infoLocation);
        }
    } while (__builtin_expect(!success, 0));
#endif
    if (__builtin_expect(__CFOASafe, 0)) {
        __CFRecordAllocationEvent(__kCFRetainEvent, (void *)cf, 0, _CFGetRetainCount(cf), NULL);
    }
    return cf;
}
static CFArrayRef __CFArrayInit(CFAllocatorRef allocator, UInt32 flags, CFIndex capacity, const CFArrayCallBacks *callBacks) {
    struct __CFArray *memory;
    UInt32 size;
    __CFBitfieldSetValue(flags, 31, 2, 0);
    if (CF_IS_COLLECTABLE_ALLOCATOR(allocator)) {
	if (!callBacks || (callBacks->retain == NULL && callBacks->release == NULL)) {
	    __CFBitfieldSetValue(flags, 4, 4, 1); // setWeak
	}
    }
    if (__CFArrayCallBacksMatchNull(callBacks)) {
	__CFBitfieldSetValue(flags, 3, 2, __kCFArrayHasNullCallBacks);
    } else if (__CFArrayCallBacksMatchCFType(callBacks)) {
	__CFBitfieldSetValue(flags, 3, 2, __kCFArrayHasCFTypeCallBacks);
    } else {
	__CFBitfieldSetValue(flags, 3, 2, __kCFArrayHasCustomCallBacks);
    }
    size = __CFArrayGetSizeOfType(flags) - sizeof(CFRuntimeBase);
    switch (__CFBitfieldGetValue(flags, 1, 0)) {
    case __kCFArrayImmutable:
	size += capacity * sizeof(struct __CFArrayBucket);
	break;
    case __kCFArrayDeque:
	break;
    }
    memory = (struct __CFArray*)_CFRuntimeCreateInstance(allocator, __kCFArrayTypeID, size, NULL);
    if (NULL == memory) {
	return NULL;
    }
    __CFBitfieldSetValue(memory->_base._cfinfo[CF_INFO_BITS], 6, 0, flags);
    __CFArraySetCount((CFArrayRef)memory, 0);
    switch (__CFBitfieldGetValue(flags, 1, 0)) {
    case __kCFArrayImmutable:
        if (isWeakMemory(memory)) {  // if weak, don't scan
            auto_zone_set_unscanned(objc_collectableZone(), memory);
        }
	if (__CFOASafe) __CFSetLastAllocationEventName(memory, "CFArray (immutable)");
	break;
    case __kCFArrayDeque:
	if (__CFOASafe) __CFSetLastAllocationEventName(memory, "CFArray (mutable-variable)");
	((struct __CFArray *)memory)->_mutations = 1;
	((struct __CFArray *)memory)->_mutInProgress = 0;
	((struct __CFArray*)memory)->_store = NULL;
	break;
    }
    if (__kCFArrayHasCustomCallBacks == __CFBitfieldGetValue(flags, 3, 2)) {
	CFArrayCallBacks *cb = (CFArrayCallBacks *)__CFArrayGetCallBacks((CFArrayRef)memory);
	*cb = *callBacks;
	FAULT_CALLBACK((void **)&(cb->retain));
	FAULT_CALLBACK((void **)&(cb->release));
	FAULT_CALLBACK((void **)&(cb->copyDescription));
	FAULT_CALLBACK((void **)&(cb->equal));
    }
    return (CFArrayRef)memory;
}
Пример #7
0
CFTreeRef CFTreeCreate(CFAllocatorRef allocator, const CFTreeContext *context) {
    CFTreeRef memory;
    uint32_t size;

    CFAssert1(NULL != context, __kCFLogAssertion, "%s(): pointer to context may not be NULL", __PRETTY_FUNCTION__);
    CFAssert1(0 == context->version, __kCFLogAssertion, "%s(): context version not initialized to 0", __PRETTY_FUNCTION__);
    size = sizeof(struct __CFTree) - sizeof(CFRuntimeBase);
    memory = (CFTreeRef)_CFRuntimeCreateInstance(allocator, __kCFTreeTypeID, size, NULL);
    if (NULL == memory) {
	return NULL;
    }
    memory->_parent = NULL;
    memory->_sibling = NULL;
    memory->_child = NULL;
    memory->_rightmostChild = NULL;

    /* Start the context off in a recognizable state */
    __CFBitfieldSetValue(memory->_base._cfinfo[CF_INFO_BITS], 1, 0, __kCFTreeHasNullCallBacks);
    CFTreeSetContext(memory, context);
    return memory;
}
Пример #8
0
void CFTreeSetContext(CFTreeRef tree, const CFTreeContext *context) {
    uint32_t newtype, oldtype = __CFTreeGetCallBacksType(tree);
    struct __CFTreeCallBacks *oldcb = (struct __CFTreeCallBacks *)__CFTreeGetCallBacks(tree);
    struct __CFTreeCallBacks *newcb;
    void *oldinfo = tree->_info;
    CFAllocatorRef allocator = CFGetAllocator(tree);
    
    if (__CFTreeCallBacksMatchNull(context)) {
        newtype = __kCFTreeHasNullCallBacks;
    } else if (__CFTreeCallBacksMatchCFType(context)) {
        newtype = __kCFTreeHasCFTypeCallBacks;
    } else {
        newtype = __kCFTreeHasCustomCallBacks;
        __CFAssignWithWriteBarrier((void **)&tree->_callbacks, _CFAllocatorAllocateGC(allocator, sizeof(struct __CFTreeCallBacks), 0));
        if (__CFOASafe) __CFSetLastAllocationEventName(tree->_callbacks, "CFTree (callbacks)");
        tree->_callbacks->retain = context->retain;
        tree->_callbacks->release = context->release;
        tree->_callbacks->copyDescription = context->copyDescription;
        FAULT_CALLBACK((void **)&(tree->_callbacks->retain));
        FAULT_CALLBACK((void **)&(tree->_callbacks->release));
        FAULT_CALLBACK((void **)&(tree->_callbacks->copyDescription));
    }
    __CFBitfieldSetValue(tree->_base._cfinfo[CF_INFO_BITS], 1, 0, newtype);
    newcb = (struct __CFTreeCallBacks *)__CFTreeGetCallBacks(tree);
    if (NULL != newcb->retain) {
        tree->_info = (void *)INVOKE_CALLBACK1(newcb->retain, context->info);
    } else {
        __CFAssignWithWriteBarrier((void **)&tree->_info, context->info);
    }
    if (NULL != oldcb->release) {
        INVOKE_CALLBACK1(oldcb->release, oldinfo);
    }
    if (oldtype == __kCFTreeHasCustomCallBacks) {
        _CFAllocatorDeallocateGC(allocator, oldcb);
    }
}
Пример #9
0
CF_INLINE void __CFMessagePortSetRemote(CFMessagePortRef ms) {
    __CFBitfieldSetValue(((CFRuntimeBase *)ms)->_info, 2, 2, 1);
}
Пример #10
0
CF_INLINE void __CFMessagePortUnsetValid(CFMessagePortRef ms) {
    __CFBitfieldSetValue(((CFRuntimeBase *)ms)->_info, 0, 0, 0);
}
Пример #11
0
CF_INLINE void __CFMessagePortSetIsDeallocing(CFMessagePortRef ms) {
    __CFBitfieldSetValue(((CFRuntimeBase *)ms)->_info, 3, 3, 1);
}
Пример #12
0
CF_INLINE void __CFWindowsMessageQueueUnsetValid(CFWindowsMessageQueueRef wmq) {
    __CFBitfieldSetValue(((CFRuntimeBase *)wmq)->_cfinfo[CF_INFO_BITS], 3, 3, 0);
}
CF_INLINE void __CFMessagePortSetExtraMachRef(CFMessagePortRef ms) {
    __CFBitfieldSetValue(((CFRuntimeBase *)ms)->_cfinfo[CF_INFO_BITS], 1, 1, 1);
}
Пример #14
0
static CFSetRef __CFSetInit(CFAllocatorRef allocator, UInt32 flags, CFIndex capacity, const CFSetCallBacks *callBacks) {
    struct __CFSet *memory;
    UInt32 size;
    CFIndex idx;
    __CFBitfieldSetValue(flags, 31, 2, 0);
    if (__CFSetCallBacksMatchNull(callBacks)) {
	__CFBitfieldSetValue(flags, 3, 2, __kCFSetHasNullCallBacks);
    } else if (__CFSetCallBacksMatchCFType(callBacks)) {
	__CFBitfieldSetValue(flags, 3, 2, __kCFSetHasCFTypeCallBacks);
    } else {
	__CFBitfieldSetValue(flags, 3, 2, __kCFSetHasCustomCallBacks);
    }
    size = __CFSetGetSizeOfType(flags) - sizeof(CFRuntimeBase);
    switch (__CFBitfieldGetValue(flags, 1, 0)) {
    case __kCFSetImmutable:
    case __kCFSetFixedMutable:
	size += __CFSetNumBucketsForCapacity(capacity) * sizeof(struct __CFSetBucket);
	break;
    case __kCFSetMutable:
	break;
    }
    memory = (struct __CFSet *)_CFRuntimeCreateInstance(allocator, __kCFSetTypeID, size, NULL);
    if (NULL == memory) {
	return NULL;
    }
    __CFBitfieldSetValue(memory->_base._info, 6, 0, flags);
    memory->_count = 0;
    memory->_bucketsUsed = 0;
    memory->_emptyMarker = (const void *)0xa1b1c1d3;
    memory->_deletedMarker = (const void *)0xa1b1c1d5;
    memory->_context = NULL;
    switch (__CFBitfieldGetValue(flags, 1, 0)) {
    case __kCFSetImmutable:
	if (__CFOASafe) __CFSetLastAllocationEventName(memory, "CFSet (immutable)");
	memory->_capacity = capacity;	/* Don't round up capacity */
	memory->_bucketsNum = __CFSetNumBucketsForCapacity(memory->_capacity);
	memory->_buckets = (struct __CFSetBucket *)((uint8_t *)memory + __CFSetGetSizeOfType(flags));
	for (idx = memory->_bucketsNum; idx--;) {
	    memory->_buckets[idx]._key = memory->_emptyMarker;
	}
	break;
    case __kCFSetFixedMutable:
	if (__CFOASafe) __CFSetLastAllocationEventName(memory, "CFSet (mutable-fixed)");
	memory->_capacity = capacity;	/* Don't round up capacity */
	memory->_bucketsNum = __CFSetNumBucketsForCapacity(memory->_capacity);
	memory->_buckets = (struct __CFSetBucket *)((uint8_t *)memory + __CFSetGetSizeOfType(flags));
	for (idx = memory->_bucketsNum; idx--;) {
	    memory->_buckets[idx]._key = memory->_emptyMarker;
	}
	break;
    case __kCFSetMutable:
	if (__CFOASafe) __CFSetLastAllocationEventName(memory, "CFSet (mutable-variable)");
	memory->_capacity = __CFSetRoundUpCapacity(1);
	memory->_bucketsNum = 0;
	memory->_buckets = NULL;
	break;
    }
    if (__kCFSetHasCustomCallBacks == __CFBitfieldGetValue(flags, 3, 2)) {
	const CFSetCallBacks *cb = __CFSetGetCallBacks((CFSetRef)memory);
	*(CFSetCallBacks *)cb = *callBacks;
	FAULT_CALLBACK((void **)&(cb->retain));
	FAULT_CALLBACK((void **)&(cb->release));
	FAULT_CALLBACK((void **)&(cb->copyDescription));
	FAULT_CALLBACK((void **)&(cb->equal));
	FAULT_CALLBACK((void **)&(cb->hash));
    }
    return (CFSetRef)memory;
}
CF_INLINE void __CFMessagePortUnsetRemote(CFMessagePortRef ms) {
    __CFBitfieldSetValue(((CFRuntimeBase *)ms)->_cfinfo[CF_INFO_BITS], 2, 2, 0);
}
CF_INLINE void __CFDataSetInline(CFDataRef data, Boolean flag) {
    __CFBitfieldSetValue(((CFRuntimeBase *)data)->_cfinfo[CF_INFO_BITS], 2, 2, (flag ? 1 : 0));
}
CF_INLINE void __CFDataSetInfoBits(CFDataRef data, UInt32 v) {__CFBitfieldSetValue(((CFRuntimeBase *)data)->_cfinfo[CF_INFO_BITS], 5, 0, v);}
Пример #18
0
CF_EXPORT void _CFRelease(CFTypeRef cf) {
    Boolean isAllocator = false;
#if __LP64__
    uint32_t lowBits;
    do {
        lowBits = ((CFRuntimeBase *)cf)->_rc;
        if (0 == lowBits) return;	// Constant CFTypeRef
        if (1 == lowBits) {
            // CANNOT WRITE ANY NEW VALUE INTO [CF_RC_BITS] UNTIL AFTER FINALIZATION
            CFTypeID typeID = __CFGenericTypeID_inline(cf);
            isAllocator = (__kCFAllocatorTypeID_CONST == typeID);
            CFRuntimeClass *cfClass = __CFRuntimeClassTable[typeID];
            if (cfClass->version & _kCFRuntimeResourcefulObject && cfClass->reclaim != NULL) {
                cfClass->reclaim(cf);
            }
            void (*func)(CFTypeRef) = __CFRuntimeClassTable[typeID]->finalize;
            if (NULL != func) {
                func(cf);
            }
            // We recheck lowBits to see if the object has been retained again during
            // the finalization process.  This allows for the finalizer to resurrect,
            // but the main point is to allow finalizers to be able to manage the
            // removal of objects from uniquing caches, which may race with other threads
            // which are allocating (looking up and finding) objects from those caches,
            // which (that thread) would be the thing doing the extra retain in that case.
            if (isAllocator || _CFAtomicCompareAndSwap32Barrier(1, 0, (int32_t *)&((CFRuntimeBase *)cf)->_rc)) {
                goto really_free;
            }
        }
    } while (!_CFAtomicCompareAndSwap32Barrier(lowBits, lowBits - 1, (int32_t *)&((CFRuntimeBase *)cf)->_rc));
#else
    volatile UInt32 *infoLocation = (UInt32 *)&(((CFRuntimeBase *)cf)->_cfinfo);
    CFIndex rcLowBits = __CFBitfieldGetValue(*infoLocation, RC_END, RC_START);
    if (__builtin_expect(0 == rcLowBits, 0)) return;        // Constant CFTypeRef
    bool success = 0;
    do {
        UInt32 initialCheckInfo = *infoLocation;
        rcLowBits = __CFBitfieldGetValue(initialCheckInfo, RC_END, RC_START);
        if (__builtin_expect(1 == rcLowBits, 0)) {
            // we think cf should be deallocated
            if (__builtin_expect(__kCFAllocatorTypeID_CONST == __CFGenericTypeID_inline(cf), 0)) {
                if (__builtin_expect(__CFOASafe, 0)) __CFRecordAllocationEvent(__kCFReleaseEvent, (void *)cf, 0, 0, NULL);
                __CFAllocatorDeallocate((void *)cf);
                success = 1;
            } else {
                // CANNOT WRITE ANY NEW VALUE INTO [CF_RC_BITS] UNTIL AFTER FINALIZATION
                CFTypeID typeID = __CFGenericTypeID_inline(cf);
                CFRuntimeClass *cfClass = __CFRuntimeClassTable[typeID];
                if (cfClass->version & _kCFRuntimeResourcefulObject && cfClass->reclaim != NULL) {
                    cfClass->reclaim(cf);
                }
                if (NULL != __CFRuntimeClassTable[typeID]->finalize) {
                    __CFRuntimeClassTable[__CFGenericTypeID_inline(cf)]->finalize(cf);
                }
                // We recheck rcLowBits to see if the object has been retained again during
                // the finalization process.  This allows for the finalizer to resurrect,
                // but the main point is to allow finalizers to be able to manage the
                // removal of objects from uniquing caches, which may race with other threads
                // which are allocating (looking up and finding) objects from those caches,
                // which (that thread) would be the thing doing the extra retain in that case.
                rcLowBits = __CFBitfieldGetValue(*infoLocation, RC_END, RC_START);
                success = (1 == rcLowBits);
                if (__builtin_expect(success, 1)) {
                    goto really_free;
                }
            }
        } else {
            // not yet junk
            UInt32 prospectiveNewInfo = initialCheckInfo; // don't want compiler to generate prospectiveNewInfo = *infoLocation.  This is why infoLocation is declared as a pointer to volatile memory.
            if (__builtin_expect((1 << 7) == rcLowBits, 0)) {
                // Time to remove a bit from the external ref count
                __CFSpinLock(&__CFRuntimeExternRefCountTableLock);
                CFIndex rcHighBitsCnt = CFBagGetCountOfValue(__CFRuntimeExternRefCountTable, DISGUISE(cf));
                if (1 == rcHighBitsCnt) {
                    __CFBitfieldSetValue(prospectiveNewInfo, RC_END, RC_START, (1 << 6) - 1);
                } else {
                    __CFBitfieldSetValue(prospectiveNewInfo, RC_END, RC_START, ((1 << 6) | (1 << 7)) - 1);
                }
                success = _CFAtomicCompareAndSwap32Barrier(*(int32_t *)&initialCheckInfo, *(int32_t *)&prospectiveNewInfo, (int32_t *)infoLocation);
                if (__builtin_expect(success, 1)) {
                    CFBagRemoveValue(__CFRuntimeExternRefCountTable, DISGUISE(cf));
                }
                __CFSpinUnlock(&__CFRuntimeExternRefCountTableLock);
            } else {
                prospectiveNewInfo -= (1 << RC_START);
                success = _CFAtomicCompareAndSwap32Barrier(*(int32_t *)&initialCheckInfo, *(int32_t *)&prospectiveNewInfo, (int32_t *)infoLocation);
            }
        }
    } while (__builtin_expect(!success, 0));

#endif
    if (__builtin_expect(__CFOASafe, 0)) {
        __CFRecordAllocationEvent(__kCFReleaseEvent, (void *)cf, 0, _CFGetRetainCount(cf), NULL);
    }
    return;

really_free:
    ;
    if (__builtin_expect(__CFOASafe, 0)) {
        // do not use _CFGetRetainCount() because cf has been freed if it was an allocator
        __CFRecordAllocationEvent(__kCFReleaseEvent, (void *)cf, 0, 0, NULL);
    }
    // cannot zombify allocators, which get deallocated by __CFAllocatorDeallocate (finalize)
    if (!isAllocator) {
        CFAllocatorRef allocator;
        Boolean usesSystemDefaultAllocator;

        if (__CFBitfieldGetValue(((const CFRuntimeBase *)cf)->_cfinfo[CF_INFO_BITS], 7, 7)) {
            allocator = kCFAllocatorSystemDefault;
        } else {
            allocator = CFGetAllocator(cf);
        }
        usesSystemDefaultAllocator = (allocator == kCFAllocatorSystemDefault);

        if (__CFZombieLevel & (1 << 0)) {
            uint8_t *ptr = (uint8_t *)cf - (usesSystemDefaultAllocator ? 0 : sizeof(CFAllocatorRef));
            size_t size = malloc_size(ptr);
            uint8_t byte = 0xFC;
            if (__CFZombieLevel & (1 << 1)) {
                ptr = (uint8_t *)cf + sizeof(CFRuntimeBase);
                size = size - sizeof(CFRuntimeBase) - (usesSystemDefaultAllocator ? 0 : sizeof(CFAllocatorRef));
            }
            if (__CFZombieLevel & (1 << 7)) {
                byte = (__CFZombieLevel >> 8) & 0xFF;
            }
Пример #19
0
CF_INLINE void __CFLocaleSetType(CFLocaleRef locale, CFIndex type) {
    __CFBitfieldSetValue(((CFRuntimeBase *)locale)->_cfinfo[CF_INFO_BITS], 1, 0, (uint8_t)type);
}
CF_INLINE void __CFSetMutableVariety(void *cf, UInt32 v) {
    __CFBitfieldSetValue(((CFRuntimeBase *)cf)->_cfinfo[CF_INFO_BITS], 1, 0, v);
}
Пример #21
0
CF_INLINE void markFinalized(CFTypeRef collection) {
    __CFBitfieldSetValue(((CFRuntimeBase *)collection)->_cfinfo[CF_INFO_BITS], 5, 5, 1);
}
CF_INLINE void __CFDataSetNeedsToZero(CFDataRef data, Boolean zero) {
    __CFBitfieldSetValue(((CFRuntimeBase *)data)->_cfinfo[CF_INFO_BITS], 6, 6, (zero ? 1 : 0));
}
CF_INLINE void __CFWindowsMessageQueueSetValid(CFWindowsMessageQueueRef wmq) {
    __CFBitfieldSetValue(((CFRuntimeBase *)wmq)->_info, 3, 3, 1);
}