void _dispatch_source_latch_and_call(dispatch_source_t ds) { uintptr_t prev; if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == 0)) { return; } prev = dispatch_atomic_xchg((intptr_t*)&ds->ds_pending_data, 0); if (ds->ds_is_level) { ds->ds_data = ~prev; } else { ds->ds_data = prev; } dispatch_assume(prev); if (prev) { if (ds->ds_handler_func) { #ifndef DISPATCH_NO_LEGACY ((dispatch_source_handler_function_t)ds->ds_handler_func)(ds->ds_handler_ctxt, ds); #else ds->ds_handler_func(ds->ds_handler_ctxt); #endif } } }
DISPATCH_NOINLINE static void _dispatch_alloc_try_create_heap(dispatch_heap_t *heap_ptr) { #if HAVE_MACH kern_return_t kr; mach_vm_size_t vm_size = MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE; mach_vm_offset_t vm_mask = ~MAGAZINE_MASK; mach_vm_address_t vm_addr = vm_page_size; while (slowpath(kr = mach_vm_map(mach_task_self(), &vm_addr, vm_size, vm_mask, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH), MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT))) { if (kr != KERN_NO_SPACE) { (void)dispatch_assume_zero(kr); DISPATCH_CLIENT_CRASH("Could not allocate heap"); } _dispatch_temporary_resource_shortage(); vm_addr = vm_page_size; } uintptr_t aligned_region = (uintptr_t)vm_addr; #else // HAVE_MACH const size_t region_sz = (1 + MAGAZINES_PER_HEAP) * BYTES_PER_MAGAZINE; void *region_p; while (!dispatch_assume((region_p = mmap(NULL, region_sz, PROT_READ|PROT_WRITE, MAP_ANON | MAP_PRIVATE, VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH), 0)) != MAP_FAILED)) { _dispatch_temporary_resource_shortage(); } uintptr_t region = (uintptr_t)region_p; uintptr_t region_end = region + region_sz; uintptr_t aligned_region, aligned_region_end; uintptr_t bottom_slop_len, top_slop_len; // Realign if needed; find the slop at top/bottom to unmap if ((region & ~(MAGAZINE_MASK)) == 0) { bottom_slop_len = 0; aligned_region = region; aligned_region_end = region_end - BYTES_PER_MAGAZINE; top_slop_len = BYTES_PER_MAGAZINE; } else { aligned_region = (region & MAGAZINE_MASK) + BYTES_PER_MAGAZINE; aligned_region_end = aligned_region + (MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE); bottom_slop_len = aligned_region - region; top_slop_len = BYTES_PER_MAGAZINE - bottom_slop_len; } #if DISPATCH_DEBUG // Double-check our math. dispatch_assert(aligned_region % PAGE_SIZE == 0); dispatch_assert(aligned_region_end % PAGE_SIZE == 0); dispatch_assert(aligned_region_end > aligned_region); dispatch_assert(top_slop_len % PAGE_SIZE == 0); dispatch_assert(bottom_slop_len % PAGE_SIZE == 0); dispatch_assert(aligned_region_end + top_slop_len == region_end); dispatch_assert(region + bottom_slop_len == aligned_region); dispatch_assert(region_sz == bottom_slop_len + top_slop_len + MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE); if (bottom_slop_len) { (void)dispatch_assume_zero(mprotect((void *)region, bottom_slop_len, PROT_NONE)); } if (top_slop_len) { (void)dispatch_assume_zero(mprotect((void *)aligned_region_end, top_slop_len, PROT_NONE)); } #else if (bottom_slop_len) { (void)dispatch_assume_zero(munmap((void *)region, bottom_slop_len)); } if (top_slop_len) { (void)dispatch_assume_zero(munmap((void *)aligned_region_end, top_slop_len)); } #endif // DISPATCH_DEBUG #endif // HAVE_MACH if (!dispatch_atomic_cmpxchg(heap_ptr, NULL, (void *)aligned_region, relaxed)) { // If we lost the race to link in the new region, unmap the whole thing. #if DISPATCH_DEBUG (void)dispatch_assume_zero(mprotect((void *)aligned_region, MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE, PROT_NONE)); #else (void)dispatch_assume_zero(munmap((void *)aligned_region, MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE)); #endif } }