Esempio n. 1
0
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_apply_invoke(void *ctxt)
{
	dispatch_apply_t da = (dispatch_apply_t)ctxt;
	size_t const iter = da->da_iterations;
	typeof(da->da_func) const func = da->da_func;
	void *const da_ctxt = da->da_ctxt;
	size_t idx, done = 0;

	_dispatch_workitem_dec(); // this unit executes many items

	// Make nested dispatch_apply fall into serial case rdar://problem/9294578
	_dispatch_thread_setspecific(dispatch_apply_key, (void*)~0ul);
	// Striding is the responsibility of the caller.
	while (fastpath((idx = dispatch_atomic_inc2o(da, da_index) - 1) < iter)) {
		_dispatch_client_callout2(da_ctxt, idx, func);
		_dispatch_workitem_inc();
		done++;
	}
	_dispatch_thread_setspecific(dispatch_apply_key, NULL);

	dispatch_atomic_release_barrier();

	// The thread that finished the last workitem wakes up the (possibly waiting)
	// thread that called dispatch_apply. They could be one and the same.
	if (done && (dispatch_atomic_add2o(da, da_done, done) == iter)) {
		_dispatch_thread_semaphore_signal(da->da_sema);
	}

	if (dispatch_atomic_dec2o(da, da_thr_cnt) == 0) {
		_dispatch_continuation_free((dispatch_continuation_t)da);
	}
}
Esempio n. 2
0
void
dispatch_group_leave(dispatch_group_t dg)
{
	dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
	dispatch_atomic_release_barrier();
	long value = dispatch_atomic_inc2o(dsema, dsema_value);
	if (slowpath(value == LONG_MIN)) {
		DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_group_leave()");
	}
	if (slowpath(value == dsema->dsema_orig)) {
		(void)_dispatch_group_wake(dsema);
	}
}
Esempio n. 3
0
long
dispatch_semaphore_signal(dispatch_semaphore_t dsema)
{
	dispatch_atomic_release_barrier();
	long value = dispatch_atomic_inc2o(dsema, dsema_value);
	if (fastpath(value > 0)) {
		return 0;
	}
	if (slowpath(value == LONG_MIN)) {
		DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_semaphore_signal()");
	}
	return _dispatch_semaphore_signal_slow(dsema);
}