Ejemplo n.º 1
0
void CFBinaryHeapAddValue(CFBinaryHeapRef heap, const void *value) {
    CFIndex idx, pidx;
    CFIndex cnt;
    CFAllocatorRef allocator = CFGetAllocator(heap);
    __CFGenericValidateType(heap, __kCFBinaryHeapTypeID);
    switch (__CFBinaryHeapMutableVariety(heap)) {
    case kCFBinaryHeapMutable:
	if (__CFBinaryHeapNumBucketsUsed(heap) == __CFBinaryHeapCapacity(heap))
	    __CFBinaryHeapGrow(heap, 1);
	break;
    }
    cnt = __CFBinaryHeapCount(heap);
    idx = cnt;
    __CFBinaryHeapSetNumBucketsUsed(heap, cnt + 1);
    __CFBinaryHeapSetCount(heap, cnt + 1);
    CFComparisonResult (*compare)(const void *, const void *, void *) = heap->_callbacks.compare;
    pidx = (idx - 1) >> 1;
    while (0 < idx) {
	void *item = heap->_buckets[pidx]._item;
	if ((!compare && item <= value) || (compare && kCFCompareGreaterThan != compare(item, value, heap->_context.info))) break;
	__CFAssignWithWriteBarrier((void **)&heap->_buckets[idx]._item, item);
	idx = pidx;
	pidx = (idx - 1) >> 1;
    }
    if (heap->_callbacks.retain) {
	__CFAssignWithWriteBarrier((void **)&heap->_buckets[idx]._item, (void *)heap->_callbacks.retain(allocator, (void *)value));
    } else {
	__CFAssignWithWriteBarrier((void **)&heap->_buckets[idx]._item, (void *)value);
    }
}
__private_extern__ CFArrayRef __CFArrayCreate0(CFAllocatorRef allocator, const void **values, CFIndex numValues, const CFArrayCallBacks *callBacks) {
    CFArrayRef result;
    const CFArrayCallBacks *cb;
    struct __CFArrayBucket *buckets;
    CFAllocatorRef bucketsAllocator;
    void* bucketsBase;
    CFIndex idx;
    CFAssert2(0 <= numValues, __kCFLogAssertion, "%s(): numValues (%d) cannot be less than zero", __PRETTY_FUNCTION__, numValues);
    result = __CFArrayInit(allocator, __kCFArrayImmutable, numValues, callBacks);
    cb = __CFArrayGetCallBacks(result);
    buckets = __CFArrayGetBucketsPtr(result);
    bucketsAllocator = isStrongMemory(result) ? allocator : kCFAllocatorNull;
    bucketsBase = CF_IS_COLLECTABLE_ALLOCATOR(bucketsAllocator) ? (void *)auto_zone_base_pointer(objc_collectableZone(), buckets) : NULL;
    if (NULL != cb->retain) {
        for (idx = 0; idx < numValues; idx++) {
	    __CFAssignWithWriteBarrier((void **)&buckets->_item, (void *)INVOKE_CALLBACK2(cb->retain, allocator, *values));
            values++;
            buckets++;
        }
    }
    else {
        for (idx = 0; idx < numValues; idx++) {
            __CFAssignWithWriteBarrier((void **)&buckets->_item, (void *)*values);
            values++;
            buckets++;
        }
    }
    __CFArraySetCount(result, numValues);
    return result;
}
Ejemplo n.º 3
0
void CFBinaryHeapRemoveMinimumValue(CFBinaryHeapRef heap) {
    void *val;
    CFIndex idx, cidx;
    CFIndex cnt;
    CFAllocatorRef allocator;
    __CFGenericValidateType(heap, __kCFBinaryHeapTypeID);
    cnt = __CFBinaryHeapCount(heap);
    if (0 == cnt) return;
    idx = 0;
    __CFBinaryHeapSetNumBucketsUsed(heap, cnt - 1);
    __CFBinaryHeapSetCount(heap, cnt - 1);
    CFComparisonResult (*compare)(const void *, const void *, void *) = heap->_callbacks.compare;
    allocator = CFGetAllocator(heap);
    if (heap->_callbacks.release)
	heap->_callbacks.release(allocator, heap->_buckets[idx]._item);
    val = heap->_buckets[cnt - 1]._item;
    cidx = (idx << 1) + 1;
    while (cidx < __CFBinaryHeapCount(heap)) {
	void *item = heap->_buckets[cidx]._item;
	if (cidx + 1 < __CFBinaryHeapCount(heap)) {
	    void *item2 = heap->_buckets[cidx + 1]._item;
	    if ((!compare && item > item2) || (compare && kCFCompareGreaterThan == compare(item, item2, heap->_context.info))) {
		cidx++;
		item = item2;
	    }
	}
	if ((!compare && item > val) || (compare && kCFCompareGreaterThan == compare(item, val, heap->_context.info))) break;
	__CFAssignWithWriteBarrier((void **)&heap->_buckets[idx]._item, item);
	idx = cidx;
	cidx = (idx << 1) + 1;
    }
    __CFAssignWithWriteBarrier((void **)&heap->_buckets[idx]._item, val);
}
Ejemplo n.º 4
0
void CFTreeSortChildren(CFTreeRef tree, CFComparatorFunction comparator, void *context) {
    CFIndex children;
    __CFGenericValidateType(tree, __kCFTreeTypeID);
    CFAssert1(NULL != comparator, __kCFLogAssertion, "%s(): pointer to comparator function may not be NULL", __PRETTY_FUNCTION__);
    children = CFTreeGetChildCount(tree);
    if (1 < children) {
        CFIndex idx;
        CFTreeRef nextChild;
        struct _tcompareContext ctx;
        CFTreeRef *list, buffer[128];

        list = (children < 128) ? buffer : (CFTreeRef *)CFAllocatorAllocate(kCFAllocatorSystemDefault, children * sizeof(CFTreeRef), 0); // XXX_PCB GC OK
	if (__CFOASafe && list != buffer) __CFSetLastAllocationEventName(tree->_callbacks, "CFTree (temp)");
        nextChild = tree->_child;
        for (idx = 0; NULL != nextChild; idx++) {
            list[idx] = nextChild;
            nextChild = nextChild->_sibling;
        }

        ctx.func = comparator;
        ctx.context = context;
        CFQSortArray(list, children, sizeof(CFTreeRef), (CFComparatorFunction)__CFTreeCompareValues, &ctx);

        __CFAssignWithWriteBarrier((void **)&tree->_child, list[0]);
        for (idx = 1; idx < children; idx++) {
            __CFAssignWithWriteBarrier((void **)&list[idx - 1]->_sibling, list[idx]);
        }
        list[idx - 1]->_sibling = NULL;
        tree->_rightmostChild = list[children - 1];
        if (list != buffer) CFAllocatorDeallocate(kCFAllocatorSystemDefault, list); // XXX_PCB GC OK
    }
}
Ejemplo n.º 5
0
void CFTreeRemove(CFTreeRef tree) {
    __CFGenericValidateType(tree, __kCFTreeTypeID);
    if (NULL != tree->_parent) {
	if (tree == tree->_parent->_child) {
            __CFAssignWithWriteBarrier((void **)&tree->_parent->_child, tree->_sibling);
            if (tree->_sibling == NULL) {
                tree->_parent->_rightmostChild = NULL;
            }
	} else {
	    CFTreeRef prevSibling = NULL;
	    for (prevSibling = tree->_parent->_child; prevSibling; prevSibling = prevSibling->_sibling) {
		if (prevSibling->_sibling == tree) {
                    __CFAssignWithWriteBarrier((void **)&prevSibling->_sibling, tree->_sibling);
                    if (tree->_parent->_rightmostChild == tree) {
                        __CFAssignWithWriteBarrier((void **)&tree->_parent->_rightmostChild, prevSibling);
                    }
		    break;
		}
	    }
	}
	tree->_parent = NULL;
	tree->_sibling = NULL;
        if (!kCFUseCollectableAllocator) CFRelease(tree);
    }
}
Ejemplo n.º 6
0
void CFTreePrependChild(CFTreeRef tree, CFTreeRef newChild) {
    __CFGenericValidateType(tree, __kCFTreeTypeID);
    __CFGenericValidateType(newChild, __kCFTreeTypeID);
    CFAssert1(NULL == newChild->_parent, __kCFLogAssertion, "%s(): must remove newChild from previous parent first", __PRETTY_FUNCTION__);
    CFAssert1(NULL == newChild->_sibling, __kCFLogAssertion, "%s(): must remove newChild from previous parent first", __PRETTY_FUNCTION__);
    if (!kCFUseCollectableAllocator) CFRetain(newChild);
    __CFAssignWithWriteBarrier((void **)&newChild->_parent, tree);
    __CFAssignWithWriteBarrier((void **)&newChild->_sibling, tree->_child);
    if (!tree->_child) {
        __CFAssignWithWriteBarrier((void **)&tree->_rightmostChild, newChild);
    }
    __CFAssignWithWriteBarrier((void **)&tree->_child, newChild);
}
void CFArraySetValueAtIndex(CFMutableArrayRef array, CFIndex idx, const void *value) {
    CF_OBJC_FUNCDISPATCHV(__kCFArrayTypeID, void, (NSMutableArray *)array, setObject:(id)value atIndex:(NSUInteger)idx);
    __CFGenericValidateType(array, __kCFArrayTypeID);
    CFAssert1(__CFArrayGetType(array) != __kCFArrayImmutable, __kCFLogAssertion, "%s(): array is immutable", __PRETTY_FUNCTION__);
    CFAssert2(0 <= idx && idx <= __CFArrayGetCount(array), __kCFLogAssertion, "%s(): index (%d) out of bounds", __PRETTY_FUNCTION__, idx);
    CHECK_FOR_MUTATION(array);
    if (idx == __CFArrayGetCount(array)) {
	_CFArrayReplaceValues(array, CFRangeMake(idx, 0), &value, 1);
    } else {
	BEGIN_MUTATION(array);
	const void *old_value;
	const CFArrayCallBacks *cb = __CFArrayGetCallBacks(array);
	CFAllocatorRef allocator = __CFGetAllocator(array);
	struct __CFArrayBucket *bucket = __CFArrayGetBucketAtIndex(array, idx);
	if (NULL != cb->retain && !hasBeenFinalized(array)) {
	    value = (void *)INVOKE_CALLBACK2(cb->retain, allocator, value);
	}
	old_value = bucket->_item;
	__CFAssignWithWriteBarrier((void **)&bucket->_item, (void *)value); // GC: handles deque/CFStorage cases.
	if (NULL != cb->release && !hasBeenFinalized(array)) {
	    INVOKE_CALLBACK2(cb->release, allocator, old_value);
	}
	array->_mutations++;
        END_MUTATION(array);
    }
}
__private_extern__ CFArrayRef __CFArrayCreateCopy0(CFAllocatorRef allocator, CFArrayRef array) {
    CFArrayRef result;
    const CFArrayCallBacks *cb;
    struct __CFArrayBucket *buckets;
    CFAllocatorRef bucketsAllocator;
    void* bucketsBase;
    CFIndex numValues = CFArrayGetCount(array);
    CFIndex idx;
    if (CF_IS_OBJC(__kCFArrayTypeID, array)) {
	cb = &kCFTypeArrayCallBacks;
    } else {
	cb = __CFArrayGetCallBacks(array);
	    }
    result = __CFArrayInit(allocator, __kCFArrayImmutable, numValues, cb);
    cb = __CFArrayGetCallBacks(result); // GC: use the new array's callbacks so we don't leak.
    buckets = __CFArrayGetBucketsPtr(result);
    bucketsAllocator = isStrongMemory(result) ? allocator : kCFAllocatorNull;
	bucketsBase = CF_IS_COLLECTABLE_ALLOCATOR(bucketsAllocator) ? (void *)auto_zone_base_pointer(objc_collectableZone(), buckets) : NULL;
    for (idx = 0; idx < numValues; idx++) {
	const void *value = CFArrayGetValueAtIndex(array, idx);
	if (NULL != cb->retain) {
	    value = (void *)INVOKE_CALLBACK2(cb->retain, allocator, value);
	}
	__CFAssignWithWriteBarrier((void **)&buckets->_item, (void *)value);
	buckets++;
    }
    __CFArraySetCount(result, numValues);
    return result;
}
/* Allocates new block of data with at least numNewValues more bytes than the current length. If clear is true, the new bytes up to at least the new length with be zeroed. */
static void __CFDataGrow(CFMutableDataRef data, CFIndex numNewValues, Boolean clear) {
    CFIndex oldLength = __CFDataLength(data);
    CFIndex newLength = oldLength + numNewValues;
    if (newLength > CFDATA_MAX_SIZE || newLength < 0) __CFDataHandleOutOfMemory(data, newLength * sizeof(uint8_t));
    CFIndex capacity = __CFDataRoundUpCapacity(newLength);
    CFIndex numBytes = __CFDataNumBytesForCapacity(capacity);
    CFAllocatorRef allocator = CFGetAllocator(data);
    void *bytes = NULL;
    void *oldBytes = data->_bytes;
    Boolean allocateCleared = clear && __CFDataShouldAllocateCleared(data, numBytes);
    if (allocateCleared && !__CFDataUseAllocator(data) && (oldLength == 0 || (newLength / oldLength) > 4)) {
	// If the length that needs to be zeroed is significantly greater than the length of the data, then calloc/memmove is probably more efficient than realloc/memset.
	bytes = __CFDataAllocate(data, numBytes * sizeof(uint8_t), true);
	if (NULL != bytes) {
	    memmove(bytes, oldBytes, oldLength);
	    __CFDataDeallocate(data);
	}
    }
    if (bytes == NULL) {
	// If the calloc/memmove approach either failed or was never attempted, then realloc.
	allocateCleared = false;
	if (__CFDataUseAllocator(data)) {
	    bytes = CFAllocatorReallocate(allocator, oldBytes, numBytes * sizeof(uint8_t), 0);
	} else {
	    bytes = realloc(oldBytes, numBytes * sizeof(uint8_t));
	}
    }
    if (NULL == bytes) __CFDataHandleOutOfMemory(data, numBytes * sizeof(uint8_t));
    __CFDataSetCapacity(data, capacity);
    __CFDataSetNumBytes(data, numBytes);
    if (clear && !allocateCleared && oldLength < newLength) memset((uint8_t *)bytes + oldLength, 0, newLength - oldLength);
    __CFDataSetNeedsToZero(data, !allocateCleared);
    __CFAssignWithWriteBarrier((void **)&data->_bytes, bytes);
    if (__CFOASafe) __CFSetLastAllocationEventName(data->_bytes, "CFData (store)");
}
Ejemplo n.º 10
0
void CFTreeInsertSibling(CFTreeRef tree, CFTreeRef newSibling) {
    CFAllocatorRef allocator;
    __CFGenericValidateType(tree, __kCFTreeTypeID);
    __CFGenericValidateType(newSibling, __kCFTreeTypeID);
    CFAssert1(NULL != tree->_parent, __kCFLogAssertion, "%s(): tree must have a parent", __PRETTY_FUNCTION__);
    CFAssert1(NULL == newSibling->_parent, __kCFLogAssertion, "%s(): must remove newSibling from previous parent first", __PRETTY_FUNCTION__);
    CFAssert1(NULL == newSibling->_sibling, __kCFLogAssertion, "%s(): must remove newSibling from previous parent first", __PRETTY_FUNCTION__);
    if (!kCFUseCollectableAllocator) CFRetain(newSibling);
    allocator = CFGetAllocator(tree);
    __CFAssignWithWriteBarrier((void **)&newSibling->_parent, tree->_parent);
    __CFAssignWithWriteBarrier((void **)&newSibling->_sibling, tree->_sibling);
    __CFAssignWithWriteBarrier((void **)&tree->_sibling, newSibling);
    if (tree->_parent) {
        if (tree->_parent->_rightmostChild == tree) {
            __CFAssignWithWriteBarrier((void **)&tree->_parent->_rightmostChild, newSibling);
        }
    }
}
// NB: AddressBook on the Phone is a fragile flower, so this function cannot do anything
// that causes the values to be retained or released.
void CFArrayExchangeValuesAtIndices(CFMutableArrayRef array, CFIndex idx1, CFIndex idx2) {
    const void *tmp;
    struct __CFArrayBucket *bucket1, *bucket2;
    CF_OBJC_FUNCDISPATCHV(__kCFArrayTypeID, void, (NSMutableArray *)array, exchangeObjectAtIndex:(NSUInteger)idx1 withObjectAtIndex:(NSUInteger)idx2);
    __CFGenericValidateType(array, __kCFArrayTypeID);
    CFAssert2(0 <= idx1 && idx1 < __CFArrayGetCount(array), __kCFLogAssertion, "%s(): index #1 (%d) out of bounds", __PRETTY_FUNCTION__, idx1);
    CFAssert2(0 <= idx2 && idx2 < __CFArrayGetCount(array), __kCFLogAssertion, "%s(): index #2 (%d) out of bounds", __PRETTY_FUNCTION__, idx2);
    CFAssert1(__CFArrayGetType(array) != __kCFArrayImmutable, __kCFLogAssertion, "%s(): array is immutable", __PRETTY_FUNCTION__);
    CHECK_FOR_MUTATION(array);
    BEGIN_MUTATION(array);
    bucket1 = __CFArrayGetBucketAtIndex(array, idx1);
    bucket2 = __CFArrayGetBucketAtIndex(array, idx2);
    tmp = bucket1->_item;
    // XXX these aren't needed.
    __CFAssignWithWriteBarrier((void **)&bucket1->_item, (void *)bucket2->_item);
    __CFAssignWithWriteBarrier((void **)&bucket2->_item, (void *)tmp);
    array->_mutations++;
    END_MUTATION(array);
}
Ejemplo n.º 12
0
static void __CFBinaryHeapGrow(CFBinaryHeapRef heap, CFIndex numNewValues) {
    CFIndex oldCount = __CFBinaryHeapCount(heap);
    CFIndex capacity = __CFBinaryHeapRoundUpCapacity(oldCount + numNewValues);
    CFAllocatorRef allocator = CFGetAllocator(heap);
    __CFBinaryHeapSetCapacity(heap, capacity);
    __CFBinaryHeapSetNumBuckets(heap, __CFBinaryHeapNumBucketsForCapacity(capacity));
    void *buckets = _CFAllocatorReallocateGC(allocator, heap->_buckets, __CFBinaryHeapNumBuckets(heap) * sizeof(struct __CFBinaryHeapBucket), isStrongMemory_Heap(heap) ? __kCFAllocatorGCScannedMemory : 0);
    __CFAssignWithWriteBarrier((void **)&heap->_buckets, buckets);
    if (__CFOASafe) __CFSetLastAllocationEventName(heap->_buckets, "CFBinaryHeap (store)");
    if (NULL == heap->_buckets) HALT;
}
Ejemplo n.º 13
0
static CFBinaryHeapRef __CFBinaryHeapInit(CFAllocatorRef allocator, UInt32 flags, CFIndex capacity, const void **values, CFIndex numValues, const CFBinaryHeapCallBacks *callBacks, const CFBinaryHeapCompareContext *compareContext) {
    CFBinaryHeapRef memory;
    CFIndex idx;
    CFIndex size;

    CFAssert2(0 <= capacity, __kCFLogAssertion, "%s(): capacity (%d) cannot be less than zero", __PRETTY_FUNCTION__, capacity);
    CFAssert2(0 <= numValues, __kCFLogAssertion, "%s(): numValues (%d) cannot be less than zero", __PRETTY_FUNCTION__, numValues);
    size = sizeof(struct __CFBinaryHeap) - sizeof(CFRuntimeBase);
    if (CF_IS_COLLECTABLE_ALLOCATOR(allocator)) {
	if (!callBacks || (callBacks->retain == NULL && callBacks->release == NULL)) {
	    __CFBitfieldSetValue(flags, 4, 4, 1); // setWeak
	}
    }

    memory = (CFBinaryHeapRef)_CFRuntimeCreateInstance(allocator, __kCFBinaryHeapTypeID, size, NULL);
    if (NULL == memory) {
	return NULL;
    }
	__CFBinaryHeapSetCapacity(memory, __CFBinaryHeapRoundUpCapacity(1));
	__CFBinaryHeapSetNumBuckets(memory, __CFBinaryHeapNumBucketsForCapacity(__CFBinaryHeapRoundUpCapacity(1)));
	void *buckets = _CFAllocatorAllocateGC(allocator, __CFBinaryHeapNumBuckets(memory) * sizeof(struct __CFBinaryHeapBucket), isStrongMemory_Heap(memory) ? __kCFAllocatorGCScannedMemory : 0);
	__CFAssignWithWriteBarrier((void **)&memory->_buckets, buckets);
	if (__CFOASafe) __CFSetLastAllocationEventName(memory->_buckets, "CFBinaryHeap (store)");
	if (NULL == memory->_buckets) {
	    CFRelease(memory);
	    return NULL;
	}
    __CFBinaryHeapSetNumBucketsUsed(memory, 0);
    __CFBinaryHeapSetCount(memory, 0);
    if (NULL != callBacks) {
	memory->_callbacks.retain = callBacks->retain;
	memory->_callbacks.release = callBacks->release;
	memory->_callbacks.copyDescription = callBacks->copyDescription;
	memory->_callbacks.compare = callBacks->compare;
    } else {
	memory->_callbacks.retain = 0;
	memory->_callbacks.release = 0;
	memory->_callbacks.copyDescription = 0;
	memory->_callbacks.compare = 0;
    }
    if (compareContext) memcpy(&memory->_context, compareContext, sizeof(CFBinaryHeapCompareContext));
// CF: retain info for proper operation
    __CFBinaryHeapSetMutableVariety(memory, kCFBinaryHeapMutable);
    for (idx = 0; idx < numValues; idx++) {
	CFBinaryHeapAddValue(memory, values[idx]);
    }
    __CFBinaryHeapSetMutableVariety(memory, __CFBinaryHeapMutableVarietyFromFlags(flags));
    return memory;
}
Ejemplo n.º 14
0
void CFTreeSetContext(CFTreeRef tree, const CFTreeContext *context) {
    uint32_t newtype, oldtype = __CFTreeGetCallBacksType(tree);
    struct __CFTreeCallBacks *oldcb = (struct __CFTreeCallBacks *)__CFTreeGetCallBacks(tree);
    struct __CFTreeCallBacks *newcb;
    void *oldinfo = tree->_info;
    CFAllocatorRef allocator = CFGetAllocator(tree);
    
    if (__CFTreeCallBacksMatchNull(context)) {
        newtype = __kCFTreeHasNullCallBacks;
    } else if (__CFTreeCallBacksMatchCFType(context)) {
        newtype = __kCFTreeHasCFTypeCallBacks;
    } else {
        newtype = __kCFTreeHasCustomCallBacks;
        __CFAssignWithWriteBarrier((void **)&tree->_callbacks, _CFAllocatorAllocateGC(allocator, sizeof(struct __CFTreeCallBacks), 0));
        if (__CFOASafe) __CFSetLastAllocationEventName(tree->_callbacks, "CFTree (callbacks)");
        tree->_callbacks->retain = context->retain;
        tree->_callbacks->release = context->release;
        tree->_callbacks->copyDescription = context->copyDescription;
        FAULT_CALLBACK((void **)&(tree->_callbacks->retain));
        FAULT_CALLBACK((void **)&(tree->_callbacks->release));
        FAULT_CALLBACK((void **)&(tree->_callbacks->copyDescription));
    }
    __CFBitfieldSetValue(tree->_base._cfinfo[CF_INFO_BITS], 1, 0, newtype);
    newcb = (struct __CFTreeCallBacks *)__CFTreeGetCallBacks(tree);
    if (NULL != newcb->retain) {
        tree->_info = (void *)INVOKE_CALLBACK1(newcb->retain, context->info);
    } else {
        __CFAssignWithWriteBarrier((void **)&tree->_info, context->info);
    }
    if (NULL != oldcb->release) {
        INVOKE_CALLBACK1(oldcb->release, oldinfo);
    }
    if (oldtype == __kCFTreeHasCustomCallBacks) {
        _CFAllocatorDeallocateGC(allocator, oldcb);
    }
}
// This function is for Foundation's benefit; no one else should use it.
void _CFArraySetCapacity(CFMutableArrayRef array, CFIndex cap) {
    if (CF_IS_OBJC(__kCFArrayTypeID, array)) return;
    __CFGenericValidateType(array, __kCFArrayTypeID);
    CFAssert1(__CFArrayGetType(array) != __kCFArrayImmutable, __kCFLogAssertion, "%s(): array is immutable", __PRETTY_FUNCTION__);
    CFAssert3(__CFArrayGetCount(array) <= cap, __kCFLogAssertion, "%s(): desired capacity (%d) is less than count (%d)", __PRETTY_FUNCTION__, cap, __CFArrayGetCount(array));
    CHECK_FOR_MUTATION(array);
    BEGIN_MUTATION(array);
    // Currently, attempting to set the capacity of an array which is the CFStorage
    // variant, or set the capacity larger than __CF_MAX_BUCKETS_PER_DEQUE, has no
    // effect.  The primary purpose of this API is to help avoid a bunch of the
    // resizes at the small capacities 4, 8, 16, etc.
    if (__CFArrayGetType(array) == __kCFArrayDeque) {
	struct __CFArrayDeque *deque = (struct __CFArrayDeque *)array->_store;
	CFIndex capacity = __CFArrayDequeRoundUpCapacity(cap);
	CFIndex size = sizeof(struct __CFArrayDeque) + capacity * sizeof(struct __CFArrayBucket);
	CFAllocatorRef allocator = __CFGetAllocator(array);
        allocator = _CFConvertAllocatorToGCRefZeroEquivalent(allocator);
	Boolean collectableMemory = CF_IS_COLLECTABLE_ALLOCATOR(allocator);
	if (NULL == deque) {
	    deque = (struct __CFArrayDeque *)CFAllocatorAllocate(allocator, size, isStrongMemory(array) ? __kCFAllocatorGCScannedMemory : 0);
	    if (NULL == deque) __CFArrayHandleOutOfMemory(array, size);
	    if (__CFOASafe) __CFSetLastAllocationEventName(deque, "CFArray (store-deque)");
	    deque->_leftIdx = capacity / 2; 
	} else {
	    struct __CFArrayDeque *olddeque = deque;
	    CFIndex oldcap = deque->_capacity;
	    deque = (struct __CFArrayDeque *)CFAllocatorAllocate(allocator, size, isStrongMemory(array) ? __kCFAllocatorGCScannedMemory : 0);
	    if (NULL == deque) __CFArrayHandleOutOfMemory(array, size);
	    objc_memmove_collectable(deque, olddeque, sizeof(struct __CFArrayDeque) + oldcap * sizeof(struct __CFArrayBucket));
	    if (!collectableMemory) CFAllocatorDeallocate(allocator, olddeque);
	    if (__CFOASafe) __CFSetLastAllocationEventName(deque, "CFArray (store-deque)");
	}
	deque->_capacity = capacity;
	__CFAssignWithWriteBarrier((void **)&array->_store, (void *)deque);
    }
    END_MUTATION(array);
}
// may move deque storage, as it may need to grow deque
static void __CFArrayRepositionDequeRegions(CFMutableArrayRef array, CFRange range, CFIndex newCount) {
    // newCount elements are going to replace the range, and the result will fit in the deque
    struct __CFArrayDeque *deque = (struct __CFArrayDeque *)array->_store;
    struct __CFArrayBucket *buckets;
    CFIndex cnt, futureCnt, numNewElems;
    CFIndex L, A, B, C, R;

    buckets = (struct __CFArrayBucket *)((uint8_t *)deque + sizeof(struct __CFArrayDeque));
    cnt = __CFArrayGetCount(array);
    futureCnt = cnt - range.length + newCount;

    L = deque->_leftIdx;		// length of region to left of deque
    A = range.location;			// length of region in deque to left of replaced range
    B = range.length;			// length of replaced range
    C = cnt - B - A;			// length of region in deque to right of replaced range
    R = deque->_capacity - cnt - L;	// length of region to right of deque
    numNewElems = newCount - B;

    CFIndex wiggle = deque->_capacity >> 17;
    if (wiggle < 4) wiggle = 4;
    if (deque->_capacity < (uint32_t)futureCnt || (cnt < futureCnt && L + R < wiggle)) {
	// must be inserting or space is tight, reallocate and re-center everything
	CFIndex capacity = __CFArrayDequeRoundUpCapacity(futureCnt + wiggle);
	CFIndex size = sizeof(struct __CFArrayDeque) + capacity * sizeof(struct __CFArrayBucket);
	CFAllocatorRef allocator = __CFGetAllocator(array);
        allocator = _CFConvertAllocatorToGCRefZeroEquivalent(allocator);
        Boolean collectableMemory = CF_IS_COLLECTABLE_ALLOCATOR(allocator);
	struct __CFArrayDeque *newDeque = (struct __CFArrayDeque *)CFAllocatorAllocate(allocator, size, isStrongMemory(array) ? __kCFAllocatorGCScannedMemory : 0);
	if (__CFOASafe) __CFSetLastAllocationEventName(newDeque, "CFArray (store-deque)");
	struct __CFArrayBucket *newBuckets = (struct __CFArrayBucket *)((uint8_t *)newDeque + sizeof(struct __CFArrayDeque));
	CFIndex oldL = L;
	CFIndex newL = (capacity - futureCnt) / 2;
	CFIndex oldC0 = oldL + A + B;
	CFIndex newC0 = newL + A + newCount;
	newDeque->_leftIdx = newL;
	newDeque->_capacity = capacity;
	if (0 < A) objc_memmove_collectable(newBuckets + newL, buckets + oldL, A * sizeof(struct __CFArrayBucket));
	if (0 < C) objc_memmove_collectable(newBuckets + newC0, buckets + oldC0, C * sizeof(struct __CFArrayBucket));
	__CFAssignWithWriteBarrier((void **)&array->_store, (void *)newDeque);
        if (!collectableMemory && deque) CFAllocatorDeallocate(allocator, deque);
//printf("3:  array %p store is now %p (%lx)\n", array, array->_store, *(unsigned long *)(array->_store));
	return;
    }

    if ((numNewElems < 0 && C < A) || (numNewElems <= R && C < A)) {	// move C
	// deleting: C is smaller
	// inserting: C is smaller and R has room
	CFIndex oldC0 = L + A + B;
	CFIndex newC0 = L + A + newCount;
	if (0 < C) objc_memmove_collectable(buckets + newC0, buckets + oldC0, C * sizeof(struct __CFArrayBucket));
	// GrP GC: zero-out newly exposed space on the right, if any
	if (oldC0 > newC0) memset(buckets + newC0 + C, 0, (oldC0 - newC0) * sizeof(struct __CFArrayBucket));
    } else if ((numNewElems < 0) || (numNewElems <= L && A <= C)) {	// move A
	// deleting: A is smaller or equal (covers remaining delete cases)
	// inserting: A is smaller and L has room
	CFIndex oldL = L;
	CFIndex newL = L - numNewElems;
	deque->_leftIdx = newL;
	if (0 < A) objc_memmove_collectable(buckets + newL, buckets + oldL, A * sizeof(struct __CFArrayBucket));
	// GrP GC: zero-out newly exposed space on the left, if any
	if (newL > oldL) memset(buckets + oldL, 0, (newL - oldL) * sizeof(struct __CFArrayBucket));
    } else {
	// now, must be inserting, and either:
	//    A<=C, but L doesn't have room (R might have, but don't care)
	//    C<A, but R doesn't have room (L might have, but don't care)
	// re-center everything
	CFIndex oldL = L;
	CFIndex newL = (L + R - numNewElems) / 2;
	newL = newL - newL / 2;
	CFIndex oldC0 = oldL + A + B;
	CFIndex newC0 = newL + A + newCount;
	deque->_leftIdx = newL;
	if (newL < oldL) {
	    if (0 < A) objc_memmove_collectable(buckets + newL, buckets + oldL, A * sizeof(struct __CFArrayBucket));
	    if (0 < C) objc_memmove_collectable(buckets + newC0, buckets + oldC0, C * sizeof(struct __CFArrayBucket));
	    // GrP GC: zero-out newly exposed space on the right, if any
	    if (oldC0 > newC0) memset(buckets + newC0 + C, 0, (oldC0 - newC0) * sizeof(struct __CFArrayBucket));
	} else {
	    if (0 < C) objc_memmove_collectable(buckets + newC0, buckets + oldC0, C * sizeof(struct __CFArrayBucket));
	    if (0 < A) objc_memmove_collectable(buckets + newL, buckets + oldL, A * sizeof(struct __CFArrayBucket));
	    // GrP GC: zero-out newly exposed space on the left, if any
	    if (newL > oldL) memset(buckets + oldL, 0, (newL - oldL) * sizeof(struct __CFArrayBucket));
	}
    }
}
// NULL bytesDeallocator to this function does not mean the default allocator, it means
// that there should be no deallocator, and the bytes should be copied.
static CFMutableDataRef __CFDataInit(CFAllocatorRef allocator, CFOptionFlags flags, CFIndex capacity, const uint8_t *bytes, CFIndex length, CFAllocatorRef bytesDeallocator) {
    CFMutableDataRef memory;
    __CFGenericValidateMutabilityFlags(flags);
    CFAssert2(0 <= capacity, __kCFLogAssertion, "%s(): capacity (%d) cannot be less than zero", __PRETTY_FUNCTION__, capacity);
    CFAssert3(kCFFixedMutable != __CFMutableVarietyFromFlags(flags) || length <= capacity, __kCFLogAssertion, "%s(): for kCFFixedMutable type, capacity (%d) must be greater than or equal to number of initial elements (%d)", __PRETTY_FUNCTION__, capacity, length);
    CFAssert2(0 <= length, __kCFLogAssertion, "%s(): length (%d) cannot be less than zero", __PRETTY_FUNCTION__, length);

    Boolean collectableMemory = CF_IS_COLLECTABLE_ALLOCATOR(allocator);
    Boolean noCopy = bytesDeallocator != NULL;
    Boolean isMutable = ((flags & __kCFMutable) != 0);
    Boolean isGrowable = ((flags & __kCFGrowable) != 0);
    Boolean allocateInline = !isGrowable && !noCopy && capacity < INLINE_BYTES_THRESHOLD;
    allocator = (allocator == NULL) ? __CFGetDefaultAllocator() : allocator;
    Boolean useAllocator = (allocator != kCFAllocatorSystemDefault && allocator != kCFAllocatorMalloc && allocator != kCFAllocatorMallocZone);
    
    CFIndex size = sizeof(struct __CFData) - sizeof(CFRuntimeBase);
    if (allocateInline) {
	size += sizeof(uint8_t) * __CFDataNumBytesForCapacity(capacity) + sizeof(uint8_t) * 15;	// for 16-byte alignment fixup
    }
    memory = (CFMutableDataRef)_CFRuntimeCreateInstance(allocator, __kCFDataTypeID, size, NULL);
    if (NULL == memory) {
	return NULL;
    }
    __CFDataSetNumBytesUsed(memory, 0);
    __CFDataSetLength(memory, 0);
    __CFDataSetInfoBits(memory,
			(allocateInline ? __kCFBytesInline : 0) | 
			(useAllocator ? __kCFUseAllocator : 0) |
			(collectableMemory ? __kCFAllocatesCollectable : 0));
    
    BOOL finalize = YES;
    BOOL scan = YES;
    if (collectableMemory) {
	if (allocateInline) {
	    // We have no pointer to anything that needs to be reclaimed, so don't scan or finalize.
	    scan = NO;
	    finalize = NO;
	} else if (noCopy) {
	    if (CF_IS_COLLECTABLE_ALLOCATOR(bytesDeallocator)) {
		// We're taking responsibility for externally GC-allocated memory, so scan us, but we don't need to finalize.
		finalize = NO;
	    } else if (bytesDeallocator == kCFAllocatorNull) {
		// We don't have responsibility for these bytes, so there's no need to be scanned and we don't need to finalize.
		scan = NO;
		finalize = NO;
	    } else {
		// We have a pointer to non-GC-allocated memory, so don't scan, but do finalize.
		scan = NO;
	    }
	}
	if (!scan) auto_zone_set_unscanned(objc_collectableZone(), memory);
	if (!finalize) auto_zone_set_nofinalize(objc_collectableZone(), memory);
    }
    if (isMutable && isGrowable) {
	__CFDataSetCapacity(memory, __CFDataRoundUpCapacity(1));
	__CFDataSetNumBytes(memory, __CFDataNumBytesForCapacity(__CFDataRoundUpCapacity(1)));
	__CFSetMutableVariety(memory, kCFMutable);
    } else {
	/* Don't round up capacity */
	__CFDataSetCapacity(memory, capacity);
	__CFDataSetNumBytes(memory, __CFDataNumBytesForCapacity(capacity));
	__CFSetMutableVariety(memory, kCFFixedMutable);
    }
    if (noCopy) {
	__CFAssignWithWriteBarrier((void **)&memory->_bytes, (uint8_t *)bytes);
	if (finalize) {
            if (_CFAllocatorIsGCRefZero(bytesDeallocator)) {
	        memory->_bytesDeallocator = bytesDeallocator;
            } else {
	        memory->_bytesDeallocator = (CFAllocatorRef)CFRetain(_CFConvertAllocatorToNonGCRefZeroEquivalent(bytesDeallocator));
            }
	}
	if (CF_IS_COLLECTABLE_ALLOCATOR(bytesDeallocator) && !_CFAllocatorIsGCRefZero(bytesDeallocator)) {
	    // When given a GC allocator which is not one of the GCRefZero ones as the deallocator, we assume that the no-copy memory is GC-allocated with a retain count of (at least) 1 and we should release it now instead of waiting until __CFDataDeallocate.
	    auto_zone_release(objc_collectableZone(), memory->_bytes);
	}
	__CFDataSetNumBytesUsed(memory, length);
	__CFDataSetLength(memory, length);
	// Mutable no-copy datas are not allowed, so don't bother setting needsToZero flag.
    } else {
	Boolean cleared = (isMutable && !isGrowable && !_CFExecutableLinkedOnOrAfter(CFSystemVersionSnowLeopard));
	if (!allocateInline) {
	    // assume that allocators give 16-byte aligned memory back -- it is their responsibility
	    __CFAssignWithWriteBarrier((void **)&memory->_bytes, __CFDataAllocate(memory, __CFDataNumBytes(memory) * sizeof(uint8_t), cleared));
	    if (__CFOASafe) __CFSetLastAllocationEventName(memory->_bytes, "CFData (store)");
	    if (NULL == memory->_bytes) {
		CFRelease(memory);
		return NULL;
	    }
	} else {
	    if (length == 0 && !isMutable) {
                // NSData sets its bytes pointer to NULL when its length is zero. Starting in 10.7 we do the same for CFData.
                memory->_bytes = NULL;
                // It is important to set this data as not inlined, so we do not recalculate a bytes pointer from null.
                __CFDataSetInline(memory, false);
	    }
	    cleared = true;
	}
	__CFDataSetNeedsToZero(memory, !cleared);
	memory->_bytesDeallocator = NULL;
	CFDataReplaceBytes(memory, CFRangeMake(0, 0), bytes, length);
    }
    __CFSetMutableVariety(memory, __CFMutableVarietyFromFlags(flags));
    return memory;
}
// This function does no ObjC dispatch or argument checking;
// It should only be called from places where that dispatch and check has already been done, or NSCFArray
void _CFArrayReplaceValues(CFMutableArrayRef array, CFRange range, const void **newValues, CFIndex newCount) {
    CHECK_FOR_MUTATION(array);
    BEGIN_MUTATION(array);
    const CFArrayCallBacks *cb;
    CFIndex idx, cnt, futureCnt;
    const void **newv, *buffer[256];
    cnt = __CFArrayGetCount(array);
    futureCnt = cnt - range.length + newCount;
    CFAssert1(newCount <= futureCnt, __kCFLogAssertion, "%s(): internal error 1", __PRETTY_FUNCTION__);
    cb = __CFArrayGetCallBacks(array);
    CFAllocatorRef allocator = __CFGetAllocator(array);

    /* Retain new values if needed, possibly allocating a temporary buffer for them */
    if (NULL != cb->retain && !hasBeenFinalized(array)) {
	newv = (newCount <= 256) ? (const void **)buffer : (const void **)CFAllocatorAllocate(kCFAllocatorSystemDefault, newCount * sizeof(void *), 0); // GC OK
	if (newv != buffer && __CFOASafe) __CFSetLastAllocationEventName(newv, "CFArray (temp)");
	for (idx = 0; idx < newCount; idx++) {
	    newv[idx] = (void *)INVOKE_CALLBACK2(cb->retain, allocator, (void *)newValues[idx]);
	}
    } else {
	newv = newValues;
    }
    array->_mutations++;

    /* Now, there are three regions of interest, each of which may be empty:
     *   A: the region from index 0 to one less than the range.location
     *   B: the region of the range
     *   C: the region from range.location + range.length to the end
     * Note that index 0 is not necessarily at the lowest-address edge
     * of the available storage. The values in region B need to get
     * released, and the values in regions A and C (depending) need
     * to get shifted if the number of new values is different from
     * the length of the range being replaced.
     */
    if (0 < range.length) {
	__CFArrayReleaseValues(array, range, false);
    }
    // region B elements are now "dead"
    if (0) {
    } else if (NULL == array->_store) {
	if (0) {
	} else if (0 <= futureCnt) {
	    struct __CFArrayDeque *deque;
	    CFIndex capacity = __CFArrayDequeRoundUpCapacity(futureCnt);
	    CFIndex size = sizeof(struct __CFArrayDeque) + capacity * sizeof(struct __CFArrayBucket);
	    deque = (struct __CFArrayDeque *)CFAllocatorAllocate(_CFConvertAllocatorToGCRefZeroEquivalent(allocator), size, isStrongMemory(array) ? __kCFAllocatorGCScannedMemory : 0);
	    if (__CFOASafe) __CFSetLastAllocationEventName(deque, "CFArray (store-deque)");
	    deque->_leftIdx = (capacity - newCount) / 2;
	    deque->_capacity = capacity;
	    __CFAssignWithWriteBarrier((void **)&array->_store, (void *)deque);
	}
    } else {		// Deque
	// reposition regions A and C for new region B elements in gap
	if (0) {
	} else if (range.length != newCount) {
	    __CFArrayRepositionDequeRegions(array, range, newCount);
	}
    }
    // copy in new region B elements
    if (0 < newCount) {
	if (0) {
	} else {	// Deque
	    struct __CFArrayDeque *deque = (struct __CFArrayDeque *)array->_store;
	    struct __CFArrayBucket *raw_buckets = (struct __CFArrayBucket *)((uint8_t *)deque + sizeof(struct __CFArrayDeque));
	    objc_memmove_collectable(raw_buckets + deque->_leftIdx + range.location, newv, newCount * sizeof(struct __CFArrayBucket));
	}
    }
    __CFArraySetCount(array, futureCnt);
    if (newv != buffer && newv != newValues) CFAllocatorDeallocate(kCFAllocatorSystemDefault, newv);
    END_MUTATION(array);
}
Ejemplo n.º 19
0
Boolean CFBinaryHeapGetMinimumIfPresent(CFBinaryHeapRef heap, const void **value) {
    __CFGenericValidateType(heap, __kCFBinaryHeapTypeID);
    if (0 == __CFBinaryHeapCount(heap)) return false;
    if (NULL != value) __CFAssignWithWriteBarrier((void **)value, heap->_buckets[0]._item);
    return true;
}