// A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data // Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr. // We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment it. // Otherwise we need to copy it and update the stack forwarding pointer static void _Block_byref_assign_copy(void *dest, const void *arg, const int flags) { struct Block_byref **destp = (struct Block_byref **)dest; struct Block_byref *src = (struct Block_byref *)arg; if (src->forwarding->flags & BLOCK_BYREF_IS_GC) { ; // don't need to do any more work } else if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) { // src points to stack bool isWeak = ((flags & (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK)) == (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK)); // if its weak ask for an object (only matters under GC) struct Block_byref *copy = (struct Block_byref *)_Block_allocator(src->size, false, isWeak); copy->flags = src->flags | _Byref_flag_initial_value; // non-GC one for caller, one for stack copy->forwarding = copy; // patch heap copy to point to itself (skip write-barrier) src->forwarding = copy; // patch stack to point to heap copy copy->size = src->size; if (isWeak) { copy->isa = &_NSConcreteWeakBlockVariable; // mark isa field so it gets weak scanning } if (src->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) { // Trust copy helper to copy everything of interest // If more than one field shows up in a byref block this is wrong XXX struct Block_byref_2 *src2 = (struct Block_byref_2 *)(src+1); struct Block_byref_2 *copy2 = (struct Block_byref_2 *)(copy+1); copy2->byref_keep = src2->byref_keep; copy2->byref_destroy = src2->byref_destroy; if (src->flags & BLOCK_BYREF_LAYOUT_EXTENDED) { struct Block_byref_3 *src3 = (struct Block_byref_3 *)(src2+1); struct Block_byref_3 *copy3 = (struct Block_byref_3*)(copy2+1); copy3->layout = src3->layout; } (*src2->byref_keep)(copy, src); } else { // just bits. Blast 'em using _Block_memmove in case they're __strong // This copy includes Block_byref_3, if any. _Block_memmove(copy+1, src+1, src->size - sizeof(struct Block_byref)); } } // already copied to heap else if ((src->forwarding->flags & BLOCK_BYREF_NEEDS_FREE) == BLOCK_BYREF_NEEDS_FREE) { latching_incr_int(&src->forwarding->flags); } // assign byref data block pointer into new Block _Block_assign(src->forwarding, (void **)destp); }
static void _Block_byref_assign_copy(void *dest, const void *arg, const int flags) { struct Block_byref **destp = (struct Block_byref **)dest; struct Block_byref *src = (struct Block_byref *)arg; //printf("_Block_byref_assign_copy called, byref destp %p, src %p, flags %x\n", destp, src, flags); //printf("src dump: %s\n", _Block_byref_dump(src)); if (src->forwarding->flags & BLOCK_IS_GC) { ; // don't need to do any more work } else if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) { //printf("making copy\n"); // src points to stack bool isWeak = ((flags & (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK)) == (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK)); // if its weak ask for an object (only matters under GC) struct Block_byref *copy = (struct Block_byref *)_Block_allocator(src->size, false, isWeak); copy->flags = src->flags | _Byref_flag_initial_value; // non-GC one for caller, one for stack copy->forwarding = copy; // patch heap copy to point to itself (skip write-barrier) src->forwarding = copy; // patch stack to point to heap copy copy->size = src->size; if (isWeak) { copy->isa = &_NSConcreteWeakBlockVariable; // mark isa field so it gets weak scanning } if (src->flags & BLOCK_HAS_COPY_DISPOSE) { // Trust copy helper to copy everything of interest // If more than one field shows up in a byref block this is wrong XXX copy->byref_keep = src->byref_keep; copy->byref_destroy = src->byref_destroy; (*src->byref_keep)(copy, src); } else { // just bits. Blast 'em using _Block_memmove in case they're __strong _Block_memmove( (void *)©->byref_keep, (void *)&src->byref_keep, src->size - sizeof(struct Block_byref_header)); } } // already copied to heap else if ((src->forwarding->flags & BLOCK_NEEDS_FREE) == BLOCK_NEEDS_FREE) { latching_incr_int(&src->forwarding->flags); } // assign byref data block pointer into new Block _Block_assign(src->forwarding, (void **)destp); }
/* Copy, or bump refcount, of a block. If really copying, call the copy helper if present. */ static void *_Block_copy_internal(const void *arg, const int flags) { struct Block_layout *aBlock; const bool wantsOne = (WANTS_ONE & flags) == WANTS_ONE; //printf("_Block_copy_internal(%p, %x)\n", arg, flags); if (!arg) return NULL; // The following would be better done as a switch statement aBlock = (struct Block_layout *)arg; if (aBlock->flags & BLOCK_NEEDS_FREE) { // latches on high latching_incr_int(&aBlock->flags); return aBlock; } else if (aBlock->flags & BLOCK_IS_GC) { // GC refcounting is expensive so do most refcounting here. if (wantsOne && ((latching_incr_int(&aBlock->flags) & BLOCK_REFCOUNT_MASK) == 1)) { // Tell collector to hang on this - it will bump the GC refcount version _Block_setHasRefcount(aBlock, true); } return aBlock; } else if (aBlock->flags & BLOCK_IS_GLOBAL) { return aBlock; } // Its a stack block. Make a copy. if (!isGC) { struct Block_layout *result = malloc(aBlock->descriptor->size); if (!result) return (void *)0; memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first // reset refcount result->flags &= ~(BLOCK_REFCOUNT_MASK); // XXX not needed result->flags |= BLOCK_NEEDS_FREE | 1; result->isa = _NSConcreteMallocBlock; if (result->flags & BLOCK_HAS_COPY_DISPOSE) { //printf("calling block copy helper %p(%p, %p)...\n", aBlock->descriptor->copy, result, aBlock); (*aBlock->descriptor->copy)(result, aBlock); // do fixup } return result; } else { // Under GC want allocation with refcount 1 so we ask for "true" if wantsOne // This allows the copy helper routines to make non-refcounted block copies under GC unsigned long int flags = aBlock->flags; bool hasCTOR = (flags & BLOCK_HAS_CTOR) != 0; struct Block_layout *result = _Block_allocator(aBlock->descriptor->size, wantsOne, hasCTOR); if (!result) return (void *)0; memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first // reset refcount // if we copy a malloc block to a GC block then we need to clear NEEDS_FREE. flags &= ~(BLOCK_NEEDS_FREE|BLOCK_REFCOUNT_MASK); // XXX not needed if (wantsOne) flags |= BLOCK_IS_GC | 1; else flags |= BLOCK_IS_GC; result->flags = flags; if (flags & BLOCK_HAS_COPY_DISPOSE) { //printf("calling block copy helper...\n"); (*aBlock->descriptor->copy)(result, aBlock); // do fixup } if (hasCTOR) { result->isa = _NSConcreteFinalizingBlock; } else { result->isa = _NSConcreteAutoBlock; } return result; } }
// Copy, or bump refcount, of a block. If really copying, call the copy helper if present. static void *_Block_copy_internal(const void *arg, const bool wantsOne) { struct Block_layout *aBlock; if (!arg) return NULL; // The following would be better done as a switch statement aBlock = (struct Block_layout *)arg; if (aBlock->flags & BLOCK_NEEDS_FREE) { // latches on high latching_incr_int(&aBlock->flags); return aBlock; } else if (aBlock->flags & BLOCK_IS_GC) { // GC refcounting is expensive so do most refcounting here. if (wantsOne && ((latching_incr_int(&aBlock->flags) & BLOCK_REFCOUNT_MASK) == 2)) { // Tell collector to hang on this - it will bump the GC refcount version _Block_setHasRefcount(aBlock, true); } return aBlock; } else if (aBlock->flags & BLOCK_IS_GLOBAL) { return aBlock; } // Its a stack block. Make a copy. if (!isGC) { struct Block_layout *result = malloc(aBlock->descriptor->size); if (!result) return NULL; memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first // reset refcount result->flags &= ~(BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING); // XXX not needed result->flags |= BLOCK_NEEDS_FREE | 2; // logical refcount 1 result->isa = _NSConcreteMallocBlock; _Block_call_copy_helper(result, aBlock); return result; } else { // Under GC want allocation with refcount 1 so we ask for "true" if wantsOne // This allows the copy helper routines to make non-refcounted block copies under GC int32_t flags = aBlock->flags; bool hasCTOR = (flags & BLOCK_HAS_CTOR) != 0; struct Block_layout *result = _Block_allocator(aBlock->descriptor->size, wantsOne, hasCTOR || _Block_has_layout(aBlock)); if (!result) return NULL; memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first // reset refcount // if we copy a malloc block to a GC block then we need to clear NEEDS_FREE. flags &= ~(BLOCK_NEEDS_FREE|BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING); // XXX not needed if (wantsOne) flags |= BLOCK_IS_GC | 2; else flags |= BLOCK_IS_GC; result->flags = flags; _Block_call_copy_helper(result, aBlock); if (hasCTOR) { result->isa = _NSConcreteFinalizingBlock; } else { result->isa = _NSConcreteAutoBlock; } return result; } }