Esempio n. 1
0
void
dispatch_resume(dispatch_object_t dou)
{
	DISPATCH_OBJECT_TFB(_dispatch_objc_resume, dou);
	// Global objects cannot be suspended or resumed. This also has the
	// side effect of saturating the suspend count of an object and
	// guarding against resuming due to overflow.
	if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) ||
			slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) {
		return;
	}
	// Check the previous value of the suspend count. If the previous
	// value was a single suspend interval, the object should be resumed.
	// If the previous value was less than the suspend interval, the object
	// has been over-resumed.
	unsigned int suspend_cnt = dispatch_atomic_sub_orig2o(dou._do,
			 do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed);
	if (fastpath(suspend_cnt > DISPATCH_OBJECT_SUSPEND_INTERVAL)) {
		// Balancing the retain() done in suspend() for rdar://8181908
		return _dispatch_release(dou._do);
	}
	if (fastpath(suspend_cnt == DISPATCH_OBJECT_SUSPEND_INTERVAL)) {
		return _dispatch_resume_slow(dou);
	}
	DISPATCH_CLIENT_CRASH("Over-resume of an object");
}
Esempio n. 2
0
static void
_dispatch_apply_redirect(void *ctxt)
{
	dispatch_apply_t da = (dispatch_apply_t)ctxt;
	uint32_t da_width = 2 * (da->da_thr_cnt - 1);
	dispatch_queue_t dq = da->da_queue, rq = dq, tq;

	do {
		uint32_t running = dispatch_atomic_add2o(rq, dq_running, da_width);
		uint32_t width = rq->dq_width;
		if (slowpath(running > width)) {
			uint32_t excess = width > 1 ? running - width : da_width;
			for (tq = dq; 1; tq = tq->do_targetq) {
				(void)dispatch_atomic_sub2o(tq, dq_running, excess);
				if (tq == rq) {
					break;
				}
			}
			da_width -= excess;
			if (slowpath(!da_width)) {
				return _dispatch_apply_serial(da);
			}
			da->da_thr_cnt -= excess / 2;
		}
		rq = rq->do_targetq;
	} while (slowpath(rq->do_targetq));
	_dispatch_apply_f2(rq, da, _dispatch_apply3);
	do {
		(void)dispatch_atomic_sub2o(dq, dq_running, da_width);
		dq = dq->do_targetq;
	} while (slowpath(dq->do_targetq));
}
Esempio n. 3
0
void
dispatch_set_finalizer_f(dispatch_object_t dou, dispatch_function_t finalizer)
{
	DISPATCH_OBJECT_TFB(_dispatch_objc_set_finalizer_f, dou, finalizer);
	if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) ||
			slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) {
		return;
	}
	dou._do->do_finalizer = finalizer;
}
Esempio n. 4
0
void
dispatch_set_context(dispatch_object_t dou, void *context)
{
	DISPATCH_OBJECT_TFB(_dispatch_objc_set_context, dou, context);
	if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) ||
			slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) {
		return;
	}
	dou._do->do_ctxt = context;
}
Esempio n. 5
0
void *
dispatch_get_context(dispatch_object_t dou)
{
	DISPATCH_OBJECT_TFB(_dispatch_objc_get_context, dou);
	if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) ||
			slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) {
		return NULL;
	}
	return dou._do->do_ctxt;
}
Esempio n. 6
0
bool
_os_object_allows_weak_reference(_os_object_t obj)
{
	int xref_cnt = obj->os_obj_xref_cnt;
	if (slowpath(xref_cnt == -1)) {
		return false;
	}
	if (slowpath(xref_cnt < -1)) {
		_OS_OBJECT_CLIENT_CRASH("Over-release of an object");
	}
	return true;
}
Esempio n. 7
0
DISPATCH_NOINLINE
_os_object_t
_os_object_retain_with_resurrect(_os_object_t obj)
{
	int xref_cnt = _os_object_xrefcnt_inc(obj);
	if (slowpath(xref_cnt < 0)) {
		_OS_OBJECT_CLIENT_CRASH("Resurrection of an overreleased object");
	}
	if (slowpath(xref_cnt == 0)) {
		_os_object_retain_internal(obj);
	}
	return obj;
}
Esempio n. 8
0
void
dispatch_group_leave(dispatch_group_t dg)
{
	dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
	dispatch_atomic_release_barrier();
	long value = dispatch_atomic_inc2o(dsema, dsema_value);
	if (slowpath(value == LONG_MIN)) {
		DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_group_leave()");
	}
	if (slowpath(value == dsema->dsema_orig)) {
		(void)_dispatch_group_wake(dsema);
	}
}
Esempio n. 9
0
void
dispatch_suspend(dispatch_object_t dou)
{
	DISPATCH_OBJECT_TFB(_dispatch_objc_suspend, dou);
	if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) ||
			slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) {
		return;
	}
	// rdar://8181908 explains why we need to do an internal retain at every
	// suspension.
	(void)dispatch_atomic_add2o(dou._do, do_suspend_cnt,
			DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed);
	_dispatch_retain(dou._do);
}
Esempio n. 10
0
DISPATCH_NOINLINE
_os_object_t
_os_object_retain(_os_object_t obj)
{
	int xref_cnt = obj->os_obj_xref_cnt;
	if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
		return obj; // global object
	}
	xref_cnt = dispatch_atomic_inc2o(obj, os_obj_xref_cnt, relaxed);
	if (slowpath(xref_cnt <= 0)) {
		_OS_OBJECT_CLIENT_CRASH("Resurrection of an object");
	}
	return obj;
}
Esempio n. 11
0
ssize_t IOPoller::_Poll(int timeout){
    assert(epoll_fd);

    int32_t n, mode;
    struct epoll_event *ev;

    //TODO: dedicated thread always blocks but others should not
    n = epoll_wait(epoll_fd, events, MAXEVENTS, timeout);

    if (n == -1) {
        if (errno != EINTR) {
            return (-1);
        }
        return (0);
    }

    //timeout
    if(n == 0 ) return 0;

    for(int i = 0; i < n; i++) {
        ev = &events[i];
        if(slowpath(ev->events == 0))
            continue;
        mode = 0;
        if(ev->events & (EPOLLIN|EPOLLRDHUP|EPOLLHUP|EPOLLERR))
            mode |= IOHandler::UT_IOREAD;
        if(ev->events & (EPOLLOUT|EPOLLHUP|EPOLLERR))
            mode |= IOHandler::UT_IOWRITE;
        if(fastpath(mode))
            ioh.PollReady( *(PollData*) ev->data.ptr , mode);
    }
    return n;

}
Esempio n. 12
0
void kThread::initializeMainUT(bool isDefaultKT) {
    /*
     * if defaultKT, then create a stack for mainUT.
     * kernel thread's stack is assigned to initUT.
     */
    if (slowpath(isDefaultKT)) {
        mainUT = uThread::create(defaultStackSize);
        /*
         * can't use mainUT->start as mainUT should not end up in
         * the Ready Queue. Thus, the stack pointer shoud be initiated
         * directly.
         */
        mainUT->stackPointer = (vaddr) stackInit(mainUT->stackPointer, (ptr_t) uThread::invoke,
                (ptr_t) kThread::defaultRun, (void*)this, nullptr, nullptr); //Initialize the thread context
        mainUT->state = uThread::State::READY;
    } else {
        /*
         * Default function takes up the default kernel thread's
         * stack pointer and run from there
         */
        mainUT = uThread::createMainUT(*localCluster);
        currentUT = mainUT;
        mainUT->state = uThread::State::RUNNING;
    }

    //Default uThreads are not being counted
    uThread::totalNumberofUTs--;
}
Esempio n. 13
0
DISPATCH_NOINLINE
void
_os_object_release(_os_object_t obj)
{
	int xref_cnt = obj->os_obj_xref_cnt;
	if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
		return; // global object
	}
	xref_cnt = dispatch_atomic_dec2o(obj, os_obj_xref_cnt, relaxed);
	if (fastpath(xref_cnt >= 0)) {
		return;
	}
	if (slowpath(xref_cnt < -1)) {
		_OS_OBJECT_CLIENT_CRASH("Over-release of an object");
	}
	return _os_object_xref_dispose(obj);
}
Esempio n. 14
0
dispatch_source_t
dispatch_source_create(dispatch_source_type_t type,
	uintptr_t handle,
	uintptr_t mask,
	dispatch_queue_t q)
{
	dispatch_source_t ds = NULL;
	static char source_label[sizeof(ds->dq_label)] = "source";

	// input validation
	if (type == NULL || (mask & ~type->mask)) {
		goto out_bad;
	}

	ds = calloc(1ul, sizeof(struct dispatch_source_s));
	if (slowpath(!ds)) {
		goto out_bad;
	}

	// Initialize as a queue first, then override some settings below.
	_dispatch_queue_init((dispatch_queue_t)ds);
	memcpy(ds->dq_label, source_label, sizeof(source_label));

	// Dispatch Object
	ds->do_vtable = &_dispatch_source_kevent_vtable;
	ds->do_ref_cnt++; // the reference the manger queue holds
	ds->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_INTERVAL;
	// do_targetq will be retained below, past point of no-return
	ds->do_targetq = q;

	if (slowpath(!type->init(ds, type, handle, mask, q))) {
		goto out_bad;
	}

	dispatch_assert(!(ds->ds_is_level && ds->ds_is_adder));
#if DISPATCH_DEBUG
	dispatch_debug(as_do(ds), __FUNCTION__);
#endif

	_dispatch_retain(as_do(ds->do_targetq));
	return ds;
	
out_bad:
	free(ds);
	return NULL;
}
Esempio n. 15
0
void
_dispatch_dispose(dispatch_object_t dou)
{
	if (slowpath(dou._do->do_next != DISPATCH_OBJECT_LISTLESS)) {
		DISPATCH_CRASH("Release while enqueued");
	}
	dx_dispose(dou._do);
	return _dispatch_dealloc(dou);
}
Esempio n. 16
0
unsigned long
_os_object_retain_count(_os_object_t obj)
{
	int xref_cnt = obj->os_obj_xref_cnt;
	if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
		return ULONG_MAX; // global object
	}
	return (unsigned long)(xref_cnt + 1);
}
Esempio n. 17
0
int IOPoller::_Close(int fd){
   struct epoll_event ev;
   int res;

  res = epoll_ctl(epoll_fd, EPOLL_CTL_DEL, fd, &ev);
  if(slowpath(res < 0 && errno != 2))
      std::cerr<< "EPOLL DEL ERROR: " << errno << std::endl;
  return res;
}
Esempio n. 18
0
DISPATCH_ALLOC_NOINLINE
static void
_dispatch_alloc_continuation_free(dispatch_continuation_t c)
{
	bitmap_t *b, *s;
	unsigned int b_idx, idx;

	get_maps_and_indices_for_continuation(c, &s, &b_idx, &b, &idx);
	bool bitmap_now_empty = bitmap_clear_bit(b, idx, CLEAR_EXCLUSIVELY);
	if (slowpath(s)) {
		(void)bitmap_clear_bit(s, b_idx, CLEAR_NONEXCLUSIVELY);
	}
	// We only try to madvise(2) pages outside of the first page.
	// (Allocations in the first page do not have a supermap entry.)
	if (slowpath(bitmap_now_empty) && slowpath(s)) {
		return _dispatch_alloc_maybe_madvise_page(c);
	}
}
Esempio n. 19
0
void
_dispatch_thread_semaphore_wait(_dispatch_thread_semaphore_t sema)
{
#if USE_MACH_SEM
	semaphore_t s4 = (semaphore_t)sema;
	kern_return_t kr;
	do {
		kr = semaphore_wait(s4);
	} while (slowpath(kr == KERN_ABORTED));
	DISPATCH_SEMAPHORE_VERIFY_KR(kr);
#elif USE_POSIX_SEM
	int ret;
	do {
		ret = sem_wait((sem_t *) sema);
	} while (slowpath(ret != 0));
	DISPATCH_SEMAPHORE_VERIFY_RET(ret);
#endif
}
Esempio n. 20
0
void
_dispatch_put_thread_semaphore(_dispatch_thread_semaphore_t sema)
{
	_dispatch_thread_semaphore_t old_sema = (_dispatch_thread_semaphore_t)
			_dispatch_thread_getspecific(dispatch_sema4_key);
	_dispatch_thread_setspecific(dispatch_sema4_key, (void*)sema);
	if (slowpath(old_sema)) {
		return _dispatch_thread_semaphore_dispose(old_sema);
	}
}
Esempio n. 21
0
DISPATCH_NOINLINE
_os_object_t
_os_object_retain(_os_object_t obj)
{
	int xref_cnt = _os_object_xrefcnt_inc(obj);
	if (slowpath(xref_cnt <= 0)) {
		_OS_OBJECT_CLIENT_CRASH("Resurrection of an object");
	}
	return obj;
}
Esempio n. 22
0
int IOPoller::_Open(int fd, PollData &pd){
    struct epoll_event ev;
    ev.events = EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLET;
    ev.data.ptr = (void*)&pd;

    int res = epoll_ctl(epoll_fd,EPOLL_CTL_ADD, fd, &ev);
    if(slowpath(res < 0))
        std::cerr<< "EPOLL ADD ERROR: " << errno << std::endl;
    return res;
}
Esempio n. 23
0
_dispatch_thread_semaphore_t
_dispatch_get_thread_semaphore(void)
{
	_dispatch_thread_semaphore_t sema = (_dispatch_thread_semaphore_t)
			_dispatch_thread_getspecific(dispatch_sema4_key);
	if (slowpath(!sema)) {
		return _dispatch_thread_semaphore_create();
	}
	_dispatch_thread_setspecific(dispatch_sema4_key, NULL);
	return sema;
}
Esempio n. 24
0
// Base address of page, or NULL if this page shouldn't be madvise()d
DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
static void *
madvisable_page_base_for_continuation(dispatch_continuation_t c)
{
	if (fastpath(continuation_is_in_first_page(c))) {
		return NULL;
	}
	void *page_base = (void *)((uintptr_t)c & ~(uintptr_t)PAGE_MASK);
#if DISPATCH_DEBUG
	struct dispatch_magazine_s *m = magazine_for_continuation(c);
	if (slowpath(page_base < (void *)&m->conts)) {
		DISPATCH_CRASH("madvisable continuation too low");
	}
	if (slowpath(page_base > (void *)&m->conts[SUPERMAPS_PER_MAGAZINE-1]
			[BITMAPS_PER_SUPERMAP-1][CONTINUATIONS_PER_BITMAP-1])) {
		DISPATCH_CRASH("madvisable continuation too high");
	}
#endif
	return page_base;
}
Esempio n. 25
0
static void
_voucher_insert(voucher_t v)
{
	mach_voucher_t kv = v->v_ipc_kvoucher;
	if (!kv) return;
	_vouchers_lock_lock();
	if (slowpath(_TAILQ_IS_ENQUEUED(v, v_list))) {
		_dispatch_voucher_debug("corruption", v);
		DISPATCH_CLIENT_CRASH(v->v_list.tqe_prev, "Voucher corruption");
	}
	TAILQ_INSERT_TAIL(_vouchers_head(kv), v, v_list);
	_vouchers_lock_unlock();
}
Esempio n. 26
0
long
dispatch_semaphore_signal(dispatch_semaphore_t dsema)
{
	dispatch_atomic_release_barrier();
	long value = dispatch_atomic_inc2o(dsema, dsema_value);
	if (fastpath(value > 0)) {
		return 0;
	}
	if (slowpath(value == LONG_MIN)) {
		DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_semaphore_signal()");
	}
	return _dispatch_semaphore_signal_slow(dsema);
}
Esempio n. 27
0
DISPATCH_NOINLINE
void
_os_object_release(_os_object_t obj)
{
	int xref_cnt = _os_object_xrefcnt_dec(obj);
	if (fastpath(xref_cnt >= 0)) {
		return;
	}
	if (slowpath(xref_cnt < -1)) {
		_OS_OBJECT_CLIENT_CRASH("Over-release of an object");
	}
	return _os_object_xref_dispose(obj);
}
Esempio n. 28
0
DISPATCH_NOINLINE
void
_os_object_release_internal(_os_object_t obj)
{
	int ref_cnt = obj->os_obj_ref_cnt;
	if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
		return; // global object
	}
	ref_cnt = dispatch_atomic_dec2o(obj, os_obj_ref_cnt, relaxed);
	if (fastpath(ref_cnt >= 0)) {
		return;
	}
	if (slowpath(ref_cnt < -1)) {
		DISPATCH_CRASH("Over-release of an object");
	}
#if DISPATCH_DEBUG
	if (slowpath(obj->os_obj_xref_cnt >= 0)) {
		DISPATCH_CRASH("Release while external references exist");
	}
#endif
	return _os_object_dispose(obj);
}
Esempio n. 29
0
bool
_os_object_retain_weak(_os_object_t obj)
{
	int xref_cnt = obj->os_obj_xref_cnt;
	if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
		return true; // global object
	}
retry:
	if (slowpath(xref_cnt == -1)) {
		return false;
	}
	if (slowpath(xref_cnt < -1)) {
		goto overrelease;
	}
	if (slowpath(!dispatch_atomic_cmpxchgvw2o(obj, os_obj_xref_cnt, xref_cnt,
			xref_cnt + 1, &xref_cnt, relaxed))) {
		goto retry;
	}
	return true;
overrelease:
	_OS_OBJECT_CLIENT_CRASH("Over-release of an object");
}
Esempio n. 30
0
void
_dispatch_xref_dispose(dispatch_object_t dou)
{
	if (slowpath(DISPATCH_OBJECT_SUSPENDED(dou._do))) {
		// Arguments for and against this assert are within 6705399
		DISPATCH_CLIENT_CRASH("Release of a suspended object");
	}
#if !USE_OBJC
	if (dx_type(dou._do) == DISPATCH_SOURCE_KEVENT_TYPE) {
		_dispatch_source_xref_dispose(dou._ds);
	} else if (dou._dq->do_vtable == DISPATCH_VTABLE(queue_runloop)) {
		_dispatch_runloop_queue_xref_dispose(dou._dq);
	}
	return _dispatch_release(dou._os_obj);
#endif
}