Beispiel #1
0
DISPATCH_NOINLINE
static dispatch_continuation_t
_dispatch_alloc_continuation_from_heap_slow(void)
{
	dispatch_heap_t *heap = &_dispatch_main_heap;
	dispatch_continuation_t cont;

	for (;;) {
		if (!fastpath(*heap)) {
			_dispatch_alloc_try_create_heap(heap);
		}
		cont = _dispatch_alloc_continuation_from_heap(*heap);
		if (fastpath(cont)) {
			return cont;
		}
		// If we have tuned our parameters right, 99.999% of apps should
		// never reach this point! The ones that do have gone off the rails...
		//
		// Magazine is full? Onto the next heap!
		// We tried 'stealing' from other CPUs' magazines. The net effect
		// was worse performance from more wasted search time and more
		// cache contention.

		// rdar://11378331
		// Future optimization: start at the page we last used, start
		// in the *zone* we last used. But this would only improve deeply
		// pathological cases like dispatch_starfish
		heap = &(*heap)->header.dh_next;
	}
}
Beispiel #2
0
void
dispatch_resume(dispatch_object_t dou)
{
	DISPATCH_OBJECT_TFB(_dispatch_objc_resume, dou);
	// Global objects cannot be suspended or resumed. This also has the
	// side effect of saturating the suspend count of an object and
	// guarding against resuming due to overflow.
	if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) ||
			slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) {
		return;
	}
	// Check the previous value of the suspend count. If the previous
	// value was a single suspend interval, the object should be resumed.
	// If the previous value was less than the suspend interval, the object
	// has been over-resumed.
	unsigned int suspend_cnt = dispatch_atomic_sub_orig2o(dou._do,
			 do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed);
	if (fastpath(suspend_cnt > DISPATCH_OBJECT_SUSPEND_INTERVAL)) {
		// Balancing the retain() done in suspend() for rdar://8181908
		return _dispatch_release(dou._do);
	}
	if (fastpath(suspend_cnt == DISPATCH_OBJECT_SUSPEND_INTERVAL)) {
		return _dispatch_resume_slow(dou);
	}
	DISPATCH_CLIENT_CRASH("Over-resume of an object");
}
Beispiel #3
0
DISPATCH_ALLOC_NOINLINE
static dispatch_continuation_t
_dispatch_alloc_continuation_alloc(void)
{
	dispatch_continuation_t cont;

	if (fastpath(_dispatch_main_heap)) {
		// Start looking in the same page where we found a continuation
		// last time.
		bitmap_t *last = last_found_page();
		if (fastpath(last)) {
			unsigned int i;
			for (i = 0; i < BITMAPS_PER_PAGE; i++) {
				bitmap_t *cur = last + i;
				unsigned int index = bitmap_set_first_unset_bit(cur);
				if (fastpath(index != NO_BITS_WERE_UNSET)) {
					bitmap_t *supermap;
					unsigned int bindex;
					get_cont_and_indices_for_bitmap_and_index(cur,
							index, &cont, &supermap, &bindex);
					mark_bitmap_as_full_if_still_full(supermap, bindex,
							cur);
					return cont;
				}
			}
		}

		cont = _dispatch_alloc_continuation_from_heap(_dispatch_main_heap);
		if (fastpath(cont)) {
			return cont;
		}
	}
	return _dispatch_alloc_continuation_from_heap_slow();
}
Beispiel #4
0
DISPATCH_ALWAYS_INLINE_NDEBUG
static void
get_maps_and_indices_for_continuation(dispatch_continuation_t c,
		bitmap_t **supermap_out, unsigned int *bitmap_index_out,
		bitmap_t **bitmap_out, unsigned int *index_out)
{
	unsigned int cindex, sindex, index, mindex;
	padded_continuation *p = (padded_continuation *)c;
	struct dispatch_magazine_s *m = magazine_for_continuation(c);
#if PACK_FIRST_PAGE_WITH_CONTINUATIONS
	if (fastpath(continuation_is_in_first_page(c))) {
		cindex = (unsigned int)(p - m->fp_conts);
		index = cindex % CONTINUATIONS_PER_BITMAP;
		mindex = cindex / CONTINUATIONS_PER_BITMAP;
		if (fastpath(supermap_out)) *supermap_out = NULL;
		if (fastpath(bitmap_index_out)) *bitmap_index_out = mindex;
		if (fastpath(bitmap_out)) *bitmap_out = &m->fp_maps[mindex];
		if (fastpath(index_out)) *index_out = index;
		return;
	}
#endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS
	cindex = (unsigned int)(p - (padded_continuation *)m->conts);
	sindex = cindex / (BITMAPS_PER_SUPERMAP * CONTINUATIONS_PER_BITMAP);
	mindex = (cindex / CONTINUATIONS_PER_BITMAP) % BITMAPS_PER_SUPERMAP;
	index = cindex % CONTINUATIONS_PER_BITMAP;
	if (fastpath(supermap_out)) *supermap_out = &m->supermaps[sindex];
	if (fastpath(bitmap_index_out)) *bitmap_index_out = mindex;
	if (fastpath(bitmap_out)) *bitmap_out = &m->maps[sindex][mindex];
	if (fastpath(index_out)) *index_out = index;
}
Beispiel #5
0
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_apply_invoke(void *ctxt)
{
	dispatch_apply_t da = (dispatch_apply_t)ctxt;
	size_t const iter = da->da_iterations;
	typeof(da->da_func) const func = da->da_func;
	void *const da_ctxt = da->da_ctxt;
	size_t idx, done = 0;

	_dispatch_workitem_dec(); // this unit executes many items

	// Make nested dispatch_apply fall into serial case rdar://problem/9294578
	_dispatch_thread_setspecific(dispatch_apply_key, (void*)~0ul);
	// Striding is the responsibility of the caller.
	while (fastpath((idx = dispatch_atomic_inc2o(da, da_index) - 1) < iter)) {
		_dispatch_client_callout2(da_ctxt, idx, func);
		_dispatch_workitem_inc();
		done++;
	}
	_dispatch_thread_setspecific(dispatch_apply_key, NULL);

	dispatch_atomic_release_barrier();

	// The thread that finished the last workitem wakes up the (possibly waiting)
	// thread that called dispatch_apply. They could be one and the same.
	if (done && (dispatch_atomic_add2o(da, da_done, done) == iter)) {
		_dispatch_thread_semaphore_signal(da->da_sema);
	}

	if (dispatch_atomic_dec2o(da, da_thr_cnt) == 0) {
		_dispatch_continuation_free((dispatch_continuation_t)da);
	}
}
Beispiel #6
0
void kThread::postSwitchFunc(uThread* nextuThread, void* args = nullptr) {

    kThread* ck = kThread::currentKT;
    //mainUT does not need to be managed here
    if (fastpath(ck->currentUT != kThread::currentKT->mainUT)) {
        switch (ck->currentUT->state) {
        case uThread::State::TERMINATED:
            ck->currentUT->destory(false);
            break;
        case uThread::State::YIELD:
            ck->currentUT->resume();
            ;
            break;
        case uThread::State::MIGRATE:
            ck->currentUT->resume();
            break;
        case uThread::State::WAITING: {
            //function and the argument should be set for pss
            assert(postSuspendFunc != nullptr);
            postSuspendFunc((void*)ck->currentUT, args);
            break;
        }
        default:
            break;
        }
    }
    //Change the current thread to the next
    ck->currentUT = nextuThread;
    nextuThread->state = uThread::State::RUNNING;
}
ssize_t IOPoller::_Poll(int timeout){
    assert(epoll_fd);

    int32_t n, mode;
    struct epoll_event *ev;

    //TODO: dedicated thread always blocks but others should not
    n = epoll_wait(epoll_fd, events, MAXEVENTS, timeout);

    if (n == -1) {
        if (errno != EINTR) {
            return (-1);
        }
        return (0);
    }

    //timeout
    if(n == 0 ) return 0;

    for(int i = 0; i < n; i++) {
        ev = &events[i];
        if(slowpath(ev->events == 0))
            continue;
        mode = 0;
        if(ev->events & (EPOLLIN|EPOLLRDHUP|EPOLLHUP|EPOLLERR))
            mode |= IOHandler::UT_IOREAD;
        if(ev->events & (EPOLLOUT|EPOLLHUP|EPOLLERR))
            mode |= IOHandler::UT_IOWRITE;
        if(fastpath(mode))
            ioh.PollReady( *(PollData*) ev->data.ptr , mode);
    }
    return n;

}
Beispiel #8
0
DISPATCH_NOINLINE
void
dispatch_group_notify_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt,
		void (*func)(void *))
{
	dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
	struct dispatch_sema_notify_s *dsn, *prev;

	// FIXME -- this should be updated to use the continuation cache
	while (!(dsn = calloc(1, sizeof(*dsn)))) {
		sleep(1);
	}

	dsn->dsn_queue = dq;
	dsn->dsn_ctxt = ctxt;
	dsn->dsn_func = func;
	_dispatch_retain(dq);
	dispatch_atomic_store_barrier();
	prev = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, dsn);
	if (fastpath(prev)) {
		prev->dsn_next = dsn;
	} else {
		_dispatch_retain(dg);
		(void)dispatch_atomic_xchg2o(dsema, dsema_notify_head, dsn);
		if (dsema->dsema_value == dsema->dsema_orig) {
			_dispatch_group_wake(dsema);
		}
	}
}
Beispiel #9
0
DISPATCH_NOINLINE
static long
_dispatch_group_wake(dispatch_semaphore_t dsema)
{
	struct dispatch_sema_notify_s *next, *head, *tail = NULL;
	long rval;

	head = dispatch_atomic_xchg2o(dsema, dsema_notify_head, NULL);
	if (head) {
		// snapshot before anything is notified/woken <rdar://problem/8554546>
		tail = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, NULL);
	}
	rval = dispatch_atomic_xchg2o(dsema, dsema_group_waiters, 0);
	if (rval) {
		// wake group waiters
#if USE_MACH_SEM
		_dispatch_semaphore_create_port(&dsema->dsema_waiter_port);
		do {
			kern_return_t kr = semaphore_signal(dsema->dsema_waiter_port);
			DISPATCH_SEMAPHORE_VERIFY_KR(kr);
		} while (--rval);
#elif USE_POSIX_SEM
		do {
			int ret = sem_post(&dsema->dsema_sem);
			DISPATCH_SEMAPHORE_VERIFY_RET(ret);
		} while (--rval);
#endif
	}
	if (head) {
		// async group notify blocks
		do {
			dispatch_async_f(head->dsn_queue, head->dsn_ctxt, head->dsn_func);
			_dispatch_release(head->dsn_queue);
			next = fastpath(head->dsn_next);
			if (!next && head != tail) {
				while (!(next = fastpath(head->dsn_next))) {
					_dispatch_hardware_pause();
				}
			}
			free(head);
		} while ((head = next));
		_dispatch_release(dsema);
	}
	return 0;
}
Beispiel #10
0
long
dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout)
{
	long value = dispatch_atomic_dec2o(dsema, dsema_value);
	dispatch_atomic_acquire_barrier();
	if (fastpath(value >= 0)) {
		return 0;
	}
	return _dispatch_semaphore_wait_slow(dsema, timeout);
}
Beispiel #11
0
static dispatch_continuation_t
_dispatch_malloc_continuation_alloc(void)
{
	dispatch_continuation_t dc;
	while (!(dc = fastpath(calloc(1,
			ROUND_UP_TO_CACHELINE_SIZE(sizeof(*dc)))))) {
		_dispatch_temporary_resource_shortage();
	}
	return dc;
}
Beispiel #12
0
DISPATCH_ALWAYS_INLINE_NDEBUG
static void
get_cont_and_indices_for_bitmap_and_index(bitmap_t *bitmap,
		unsigned int index, dispatch_continuation_t *continuation_out,
		bitmap_t **supermap_out, unsigned int *bitmap_index_out)
{
	// m_for_c wants a continuation not a bitmap, but it works because it
	// just masks off the bottom bits of the address.
	struct dispatch_magazine_s *m = magazine_for_continuation((void *)bitmap);
	unsigned int mindex = (unsigned int)(bitmap - m->maps[0]);
	unsigned int bindex = mindex % BITMAPS_PER_SUPERMAP;
	unsigned int sindex = mindex / BITMAPS_PER_SUPERMAP;
	dispatch_assert(&m->maps[sindex][bindex] == bitmap);
	if (fastpath(continuation_out)) {
		*continuation_out = continuation_address(m, sindex, bindex, index);
	}
	if (fastpath(supermap_out)) *supermap_out = supermap_address(m, sindex);
	if (fastpath(bitmap_index_out)) *bitmap_index_out = bindex;
}
Beispiel #13
0
long
dispatch_semaphore_signal(dispatch_semaphore_t dsema)
{
	dispatch_atomic_release_barrier();
	long value = dispatch_atomic_inc2o(dsema, dsema_value);
	if (fastpath(value > 0)) {
		return 0;
	}
	if (slowpath(value == LONG_MIN)) {
		DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_semaphore_signal()");
	}
	return _dispatch_semaphore_signal_slow(dsema);
}
Beispiel #14
0
DISPATCH_NOINLINE
void
_os_object_release(_os_object_t obj)
{
	int xref_cnt = _os_object_xrefcnt_dec(obj);
	if (fastpath(xref_cnt >= 0)) {
		return;
	}
	if (slowpath(xref_cnt < -1)) {
		_OS_OBJECT_CLIENT_CRASH("Over-release of an object");
	}
	return _os_object_xref_dispose(obj);
}
Beispiel #15
0
DISPATCH_ALWAYS_INLINE_NDEBUG
static dispatch_continuation_t
alloc_continuation_from_first_page(struct dispatch_magazine_s *magazine)
{
	unsigned int i, index, continuation_index;

	// TODO: unroll if this is hot?
	for (i = 0; i < FULL_BITMAPS_IN_FIRST_PAGE; i++) {
		index = bitmap_set_first_unset_bit(&magazine->fp_maps[i]);
		if (fastpath(index != NO_BITS_WERE_UNSET)) goto found;
	}
	if (REMAINDERED_CONTINUATIONS_IN_FIRST_PAGE) {
		index = bitmap_set_first_unset_bit_upto_index(&magazine->fp_maps[i],
				REMAINDERED_CONTINUATIONS_IN_FIRST_PAGE - 1);
		if (fastpath(index != NO_BITS_WERE_UNSET)) goto found;
	}
	return NULL;

found:
	continuation_index = (i * CONTINUATIONS_PER_BITMAP) + index;
	return (dispatch_continuation_t)&magazine->fp_conts[continuation_index];
}
Beispiel #16
0
DISPATCH_NOINLINE
void
_os_object_release(_os_object_t obj)
{
	int xref_cnt = obj->os_obj_xref_cnt;
	if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
		return; // global object
	}
	xref_cnt = dispatch_atomic_dec2o(obj, os_obj_xref_cnt, relaxed);
	if (fastpath(xref_cnt >= 0)) {
		return;
	}
	if (slowpath(xref_cnt < -1)) {
		_OS_OBJECT_CLIENT_CRASH("Over-release of an object");
	}
	return _os_object_xref_dispose(obj);
}
Beispiel #17
0
// Base address of page, or NULL if this page shouldn't be madvise()d
DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
static void *
madvisable_page_base_for_continuation(dispatch_continuation_t c)
{
	if (fastpath(continuation_is_in_first_page(c))) {
		return NULL;
	}
	void *page_base = (void *)((uintptr_t)c & ~(uintptr_t)PAGE_MASK);
#if DISPATCH_DEBUG
	struct dispatch_magazine_s *m = magazine_for_continuation(c);
	if (slowpath(page_base < (void *)&m->conts)) {
		DISPATCH_CRASH("madvisable continuation too low");
	}
	if (slowpath(page_base > (void *)&m->conts[SUPERMAPS_PER_MAGAZINE-1]
			[BITMAPS_PER_SUPERMAP-1][CONTINUATIONS_PER_BITMAP-1])) {
		DISPATCH_CRASH("madvisable continuation too high");
	}
#endif
	return page_base;
}
Beispiel #18
0
static void
_dispatch_apply2(void *_ctxt)
{
	struct dispatch_apply_s *da = _ctxt;
	size_t const iter = da->da_iterations;
	dispatch_function_apply_t func = da->da_func;
	void *const ctxt = da->da_ctxt;
	size_t idx;

	_dispatch_workitem_dec(); // this unit executes many items

	// Striding is the responsibility of the caller.
	while (fastpath((idx = dispatch_atomic_inc((intptr_t*)&da->da_index) - 1) < iter)) {
		func(ctxt, idx);
		_dispatch_workitem_inc();
	}

	if (dispatch_atomic_dec((intptr_t*)&da->da_thr_cnt) == 0) {
		dispatch_semaphore_signal(da->da_sema);
	}
}
Beispiel #19
0
DISPATCH_NOINLINE
static dispatch_continuation_t
_dispatch_alloc_continuation_from_heap(dispatch_heap_t heap)
{
	dispatch_continuation_t cont;

	unsigned int cpu_number = _dispatch_cpu_number();
#ifdef DISPATCH_DEBUG
	dispatch_assert(cpu_number < NUM_CPU);
#endif

#if PACK_FIRST_PAGE_WITH_CONTINUATIONS
	// First try the continuations in the first page for this CPU
	cont = alloc_continuation_from_first_page(&(heap[cpu_number]));
	if (fastpath(cont)) {
		return cont;
	}
#endif
	// Next, try the rest of the magazine for this CPU
	cont = alloc_continuation_from_magazine(&(heap[cpu_number]));
	return cont;
}
Beispiel #20
0
DISPATCH_NOINLINE
void
_os_object_release_internal(_os_object_t obj)
{
	int ref_cnt = obj->os_obj_ref_cnt;
	if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
		return; // global object
	}
	ref_cnt = dispatch_atomic_dec2o(obj, os_obj_ref_cnt, relaxed);
	if (fastpath(ref_cnt >= 0)) {
		return;
	}
	if (slowpath(ref_cnt < -1)) {
		DISPATCH_CRASH("Over-release of an object");
	}
#if DISPATCH_DEBUG
	if (slowpath(obj->os_obj_xref_cnt >= 0)) {
		DISPATCH_CRASH("Release while external references exist");
	}
#endif
	return _os_object_dispose(obj);
}