Ejemplo n.º 1
0
void
dispatch_resume(dispatch_object_t dou)
{
	DISPATCH_OBJECT_TFB(_dispatch_objc_resume, dou);
	// Global objects cannot be suspended or resumed. This also has the
	// side effect of saturating the suspend count of an object and
	// guarding against resuming due to overflow.
	if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) ||
			slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) {
		return;
	}
	// Check the previous value of the suspend count. If the previous
	// value was a single suspend interval, the object should be resumed.
	// If the previous value was less than the suspend interval, the object
	// has been over-resumed.
	unsigned int suspend_cnt = dispatch_atomic_sub_orig2o(dou._do,
			 do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed);
	if (fastpath(suspend_cnt > DISPATCH_OBJECT_SUSPEND_INTERVAL)) {
		// Balancing the retain() done in suspend() for rdar://8181908
		return _dispatch_release(dou._do);
	}
	if (fastpath(suspend_cnt == DISPATCH_OBJECT_SUSPEND_INTERVAL)) {
		return _dispatch_resume_slow(dou);
	}
	DISPATCH_CLIENT_CRASH("Over-resume of an object");
}
Ejemplo n.º 2
0
void
_dispatch_semaphore_dispose(dispatch_object_t dou)
{
	dispatch_semaphore_t dsema = dou._dsema;

	if (dsema->dsema_value < dsema->dsema_orig) {
		DISPATCH_CLIENT_CRASH(
				"Semaphore/group object deallocated while in use");
	}

#if USE_MACH_SEM
	kern_return_t kr;
	if (dsema->dsema_port) {
		kr = semaphore_destroy(mach_task_self(), dsema->dsema_port);
		DISPATCH_SEMAPHORE_VERIFY_KR(kr);
	}
	if (dsema->dsema_waiter_port) {
		kr = semaphore_destroy(mach_task_self(), dsema->dsema_waiter_port);
		DISPATCH_SEMAPHORE_VERIFY_KR(kr);
	}
#elif USE_POSIX_SEM
	int ret = sem_destroy(&dsema->dsema_sem);
	DISPATCH_SEMAPHORE_VERIFY_RET(ret);
#endif
}
Ejemplo n.º 3
0
void
dispatch_group_leave(dispatch_group_t dg)
{
	dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
	dispatch_atomic_release_barrier();
	long value = dispatch_atomic_inc2o(dsema, dsema_value);
	if (slowpath(value == LONG_MIN)) {
		DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_group_leave()");
	}
	if (slowpath(value == dsema->dsema_orig)) {
		(void)_dispatch_group_wake(dsema);
	}
}
Ejemplo n.º 4
0
long
dispatch_semaphore_signal(dispatch_semaphore_t dsema)
{
	dispatch_atomic_release_barrier();
	long value = dispatch_atomic_inc2o(dsema, dsema_value);
	if (fastpath(value > 0)) {
		return 0;
	}
	if (slowpath(value == LONG_MIN)) {
		DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_semaphore_signal()");
	}
	return _dispatch_semaphore_signal_slow(dsema);
}
Ejemplo n.º 5
0
static void
_voucher_insert(voucher_t v)
{
	mach_voucher_t kv = v->v_ipc_kvoucher;
	if (!kv) return;
	_vouchers_lock_lock();
	if (slowpath(_TAILQ_IS_ENQUEUED(v, v_list))) {
		_dispatch_voucher_debug("corruption", v);
		DISPATCH_CLIENT_CRASH(v->v_list.tqe_prev, "Voucher corruption");
	}
	TAILQ_INSERT_TAIL(_vouchers_head(kv), v, v_list);
	_vouchers_lock_unlock();
}
void
dispatch_main(void)
{
#if HAVE_PTHREAD_MAIN_NP
	if (pthread_main_np()) {
#endif
		_dispatch_program_is_probably_callback_driven = true;
		pthread_exit(NULL);
		DISPATCH_CRASH("pthread_exit() returned");
#if HAVE_PTHREAD_MAIN_NP
	}
	DISPATCH_CLIENT_CRASH("dispatch_main() must be called on the main thread");
#endif
}
Ejemplo n.º 7
0
void
_dispatch_xref_dispose(dispatch_object_t dou)
{
	if (slowpath(DISPATCH_OBJECT_SUSPENDED(dou._do))) {
		// Arguments for and against this assert are within 6705399
		DISPATCH_CLIENT_CRASH("Release of a suspended object");
	}
#if !USE_OBJC
	if (dx_type(dou._do) == DISPATCH_SOURCE_KEVENT_TYPE) {
		_dispatch_source_xref_dispose(dou._ds);
	} else if (dou._dq->do_vtable == DISPATCH_VTABLE(queue_runloop)) {
		_dispatch_runloop_queue_xref_dispose(dou._dq);
	}
	return _dispatch_release(dou._os_obj);
#endif
}
Ejemplo n.º 8
0
static void
_voucher_remove(voucher_t v)
{
	mach_voucher_t kv = v->v_ipc_kvoucher;
	if (!_TAILQ_IS_ENQUEUED(v, v_list)) return;
	_vouchers_lock_lock();
	if (slowpath(!kv)) {
		_dispatch_voucher_debug("corruption", v);
		DISPATCH_CLIENT_CRASH(0, "Voucher corruption");
	}
	// check for resurrection race with _voucher_find_and_retain
	if (os_atomic_load2o(v, os_obj_xref_cnt, ordered) < 0 &&
			_TAILQ_IS_ENQUEUED(v, v_list)) {
		TAILQ_REMOVE(_vouchers_head(kv), v, v_list);
		_TAILQ_MARK_NOT_ENQUEUED(v, v_list);
		v->v_list.tqe_next = (void*)~0ull;
	}
	_vouchers_lock_unlock();
}
Ejemplo n.º 9
0
DISPATCH_NOINLINE
void
_dispatch_source_xref_release(dispatch_source_t ds)
{
#ifndef DISPATCH_NO_LEGACY
	if (ds->ds_is_legacy) {
		if (!(ds->ds_timer.flags & DISPATCH_TIMER_TYPE_MASK == DISPATCH_TIMER_ONESHOT)) {
			dispatch_source_cancel(ds);
		}
		// Clients often leave sources suspended at the last release
		dispatch_atomic_and(&ds->do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_LOCK);
	} else
#endif
	if (slowpath(DISPATCH_OBJECT_SUSPENDED(ds))) {
		// Arguments for and against this assert are within 6705399
		DISPATCH_CLIENT_CRASH("Release of a suspended object");
	}
	_dispatch_wakeup(as_do(ds));
	_dispatch_release(as_do(ds));
}
// Return true if this bit was the last in the bitmap, and it is now all zeroes
DISPATCH_ALWAYS_INLINE_NDEBUG
static bool
bitmap_clear_bit(volatile bitmap_t *bitmap, unsigned int index,
		bool exclusively)
{
#if DISPATCH_DEBUG
	dispatch_assert(index < CONTINUATIONS_PER_BITMAP);
#endif
	const bitmap_t mask = BITMAP_C(1) << index;
	bitmap_t b;

	if (exclusively == CLEAR_EXCLUSIVELY) {
		if (slowpath((*bitmap & mask) == 0)) {
			DISPATCH_CLIENT_CRASH(*bitmap,
					"Corruption: failed to clear bit exclusively");
		}
	}

	// and-and-fetch
	b = os_atomic_and(bitmap, ~mask, release);
	return b == 0;
}
Ejemplo n.º 11
0
DISPATCH_NOINLINE
static void
_dispatch_alloc_try_create_heap(dispatch_heap_t *heap_ptr)
{
#if HAVE_MACH
	kern_return_t kr;
	mach_vm_size_t vm_size = MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE;
	mach_vm_offset_t vm_mask = ~MAGAZINE_MASK;
	mach_vm_address_t vm_addr = vm_page_size;
	while (slowpath(kr = mach_vm_map(mach_task_self(), &vm_addr, vm_size,
			vm_mask, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH),
			MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
			VM_INHERIT_DEFAULT))) {
		if (kr != KERN_NO_SPACE) {
			(void)dispatch_assume_zero(kr);
			DISPATCH_CLIENT_CRASH("Could not allocate heap");
		}
		_dispatch_temporary_resource_shortage();
		vm_addr = vm_page_size;
	}
	uintptr_t aligned_region = (uintptr_t)vm_addr;
#else // HAVE_MACH
	const size_t region_sz = (1 + MAGAZINES_PER_HEAP) * BYTES_PER_MAGAZINE;
	void *region_p;
	while (!dispatch_assume((region_p = mmap(NULL, region_sz,
			PROT_READ|PROT_WRITE, MAP_ANON | MAP_PRIVATE,
			VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH), 0)) != MAP_FAILED)) {
		_dispatch_temporary_resource_shortage();
	}
	uintptr_t region = (uintptr_t)region_p;
	uintptr_t region_end = region + region_sz;
	uintptr_t aligned_region, aligned_region_end;
	uintptr_t bottom_slop_len, top_slop_len;
	// Realign if needed; find the slop at top/bottom to unmap
	if ((region & ~(MAGAZINE_MASK)) == 0) {
		bottom_slop_len = 0;
		aligned_region = region;
		aligned_region_end = region_end - BYTES_PER_MAGAZINE;
		top_slop_len = BYTES_PER_MAGAZINE;
	} else {
		aligned_region = (region & MAGAZINE_MASK) + BYTES_PER_MAGAZINE;
		aligned_region_end = aligned_region +
				(MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE);
		bottom_slop_len = aligned_region - region;
		top_slop_len = BYTES_PER_MAGAZINE - bottom_slop_len;
	}
#if DISPATCH_DEBUG
	// Double-check our math.
	dispatch_assert(aligned_region % PAGE_SIZE == 0);
	dispatch_assert(aligned_region_end % PAGE_SIZE == 0);
	dispatch_assert(aligned_region_end > aligned_region);
	dispatch_assert(top_slop_len % PAGE_SIZE == 0);
	dispatch_assert(bottom_slop_len % PAGE_SIZE == 0);
	dispatch_assert(aligned_region_end + top_slop_len == region_end);
	dispatch_assert(region + bottom_slop_len == aligned_region);
	dispatch_assert(region_sz == bottom_slop_len + top_slop_len +
			MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE);
	if (bottom_slop_len) {
		(void)dispatch_assume_zero(mprotect((void *)region, bottom_slop_len,
				PROT_NONE));
	}
	if (top_slop_len) {
		(void)dispatch_assume_zero(mprotect((void *)aligned_region_end,
				top_slop_len, PROT_NONE));
	}
#else
	if (bottom_slop_len) {
		(void)dispatch_assume_zero(munmap((void *)region, bottom_slop_len));
	}
	if (top_slop_len) {
		(void)dispatch_assume_zero(munmap((void *)aligned_region_end,
				top_slop_len));
	}
#endif // DISPATCH_DEBUG
#endif // HAVE_MACH

	if (!dispatch_atomic_cmpxchg(heap_ptr, NULL, (void *)aligned_region,
			relaxed)) {
		// If we lost the race to link in the new region, unmap the whole thing.
#if DISPATCH_DEBUG
		(void)dispatch_assume_zero(mprotect((void *)aligned_region,
				MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE, PROT_NONE));
#else
		(void)dispatch_assume_zero(munmap((void *)aligned_region,
				MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE));
#endif
	}
}