Ejemplo n.º 1
0
static int
tegra_cpufreq_freq_helper(SYSCTLFN_ARGS)
{
	struct sysctlnode node;
	int fq, oldfq = 0, error;
	uint64_t xc;

	node = *rnode;
	node.sysctl_data = &fq;

	fq = cpufreq_get_rate();
	if (rnode->sysctl_num == cpufreq_node_target)
		oldfq = fq;

	error = sysctl_lookup(SYSCTLFN_CALL(&node));
	if (error || newp == NULL)
		return error;

	if (fq == oldfq || rnode->sysctl_num != cpufreq_node_target)
		return 0;

	if (atomic_cas_uint(&cpufreq_busy, 0, 1) != 0)
		return EBUSY;

	error = cpufreq_set_rate(fq);
	if (error == 0) {
		xc = xc_broadcast(0, tegra_cpufreq_post, NULL, NULL);
		xc_wait(xc);
		pmf_event_inject(NULL, PMFE_SPEED_CHANGED);
	}

	atomic_dec_uint(&cpufreq_busy);

	return error;
}
Ejemplo n.º 2
0
static VALUE ir_compare_and_set(volatile VALUE self, VALUE expect_value, VALUE new_value) {
#if __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 1050
    if (OSAtomicCompareAndSwap64(expect_value, new_value, &DATA_PTR(self))) {
	return Qtrue;
    }
#elif defined(__sun)
/*  Assuming VALUE is uintptr_t */
/*  Based on the definition of uintptr_t from /usr/include/sys/int_types.h */
#if defined(_LP64) || defined(_I32LPx)
    /*  64-bit: uintptr_t === unsigned long */
    if (atomic_cas_ulong((uintptr_t *) &DATA_PTR(self), expect_value, new_value)) {
        return Qtrue;
    }
#else
    /*  32-bit: uintptr_t === unsigned int */
    if (atomic_cas_uint((uintptr_t *) &DATA_PTR(self), expect_value, new_value)) {
        return Qtrue;
    }
#endif
#elif defined _MSC_VER && defined _M_AMD64
    if (InterlockedCompareExchange64((LONGLONG*)&DATA_PTR(self), new_value, expect_value)) {
	return Qtrue;
    }
#elif defined _MSC_VER && defined _M_IX86
    if (InterlockedCompareExchange((LONG*)&DATA_PTR(self), new_value, expect_value)) {
	return Qtrue;
    }
#else
    if (__sync_bool_compare_and_swap(&DATA_PTR(self), expect_value, new_value)) {
	return Qtrue;
    }
#endif
    return Qfalse;
}
Ejemplo n.º 3
0
bool refcount_inc(struct refcount *r)
{
	unsigned int nval;
	unsigned int oval = atomic_load_uint(&r->val);

	while (true) {
		nval = oval + 1;

		/* r->val is 0, we can't do anything more. */
		if (!oval)
			return false;

		if (atomic_cas_uint(&r->val, &oval, nval))
			return true;
		/*
		 * At this point atomic_cas_uint() has updated oval to the
		 * current r->val.
		 */
	}
}
Ejemplo n.º 4
0
bool refcount_dec(struct refcount *r)
{
	unsigned int nval;
	unsigned int oval = atomic_load_uint(&r->val);

	while (true) {
		assert(oval);
		nval = oval - 1;

		if (atomic_cas_uint(&r->val, &oval, nval)) {
			/*
			 * Value has been updated, if value was set to 0
			 * return true to indicate that.
			 */
			return !nval;
		}
		/*
		 * At this point atomic_cas_uint() has updated oval to the
		 * current r->val.
		 */
	}
}
Ejemplo n.º 5
0
mblk_t *
oce_ring_tx(void *ring_handle, mblk_t *mp)
{
	struct oce_wq *wq = ring_handle;
	mblk_t *nxt_pkt;
	mblk_t *rmp = NULL;
	struct oce_dev *dev = wq->parent;

	if (dev->suspended) {
		freemsg(mp);
		return (NULL);
	}
	while (mp != NULL) {
		/* Save the Pointer since mp will be freed in case of copy */
		nxt_pkt = mp->b_next;
		mp->b_next = NULL;
		/* Hardcode wq since we have only one */
		rmp = oce_send_packet(wq, mp);
		if (rmp != NULL) {
			/* restore the chain */
			rmp->b_next = nxt_pkt;
			break;
		}
		mp  = nxt_pkt;
	}

	if (wq->resched) {
		if (atomic_cas_uint(&wq->qmode, OCE_MODE_POLL, OCE_MODE_INTR)
		    == OCE_MODE_POLL) {
			oce_arm_cq(wq->parent, wq->cq->cq_id, 0, B_TRUE);
			wq->last_armed = ddi_get_lbolt();
		}
	}

	return (rmp);
}
Ejemplo n.º 6
0
static bool atomic_cas(sp_counted_base_atomic_type volatile *pw,int old_value,int new_value)
{
    return atomic_cas_uint(&pw->ui,unsigned(old_value),unsigned(new_value))==unsigned(old_value);
}
Ejemplo n.º 7
0
int
turnstile_block(turnstile_t *ts, int qnum, void *sobj, sobj_ops_t *sobj_ops,
    kmutex_t *mp, lwp_timer_t *lwptp)
{
	kthread_t *owner;
	kthread_t *t = curthread;
	proc_t *p = ttoproc(t);
	klwp_t *lwp = ttolwp(t);
	turnstile_chain_t *tc = &TURNSTILE_CHAIN(sobj);
	int error = 0;
	int loser = 0;

	ASSERT(DISP_LOCK_HELD(&tc->tc_lock));
	ASSERT(mp == NULL || IS_UPI(mp));
	ASSERT((SOBJ_TYPE(sobj_ops) == SOBJ_USER_PI) ^ (mp == NULL));

	thread_lock_high(t);

	if (ts == NULL) {
		/*
		 * This is the first thread to block on this sobj.
		 * Take its attached turnstile and add it to the hash chain.
		 */
		ts = t->t_ts;
		ts->ts_sobj = sobj;
		ts->ts_next = tc->tc_first;
		tc->tc_first = ts;
		ASSERT(ts->ts_waiters == 0);
	} else {
		/*
		 * Another thread has already donated its turnstile
		 * to block on this sobj, so ours isn't needed.
		 * Stash it on the active turnstile's freelist.
		 */
		turnstile_t *myts = t->t_ts;
		myts->ts_free = ts->ts_free;
		ts->ts_free = myts;
		t->t_ts = ts;
		ASSERT(ts->ts_sobj == sobj);
		ASSERT(ts->ts_waiters > 0);
	}

	/*
	 * Put the thread to sleep.
	 */
	ASSERT(t != CPU->cpu_idle_thread);
	ASSERT(CPU_ON_INTR(CPU) == 0);
	ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL);
	ASSERT(t->t_state == TS_ONPROC);

	if (SOBJ_TYPE(sobj_ops) == SOBJ_USER_PI) {
		curthread->t_flag |= T_WAKEABLE;
	}
	CL_SLEEP(t);		/* assign kernel priority */
	THREAD_SLEEP(t, &tc->tc_lock);
	t->t_wchan = sobj;
	t->t_sobj_ops = sobj_ops;
	DTRACE_SCHED(sleep);

	if (lwp != NULL) {
		lwp->lwp_ru.nvcsw++;
		(void) new_mstate(t, LMS_SLEEP);
		if (SOBJ_TYPE(sobj_ops) == SOBJ_USER_PI) {
			lwp->lwp_asleep = 1;
			lwp->lwp_sysabort = 0;
			/*
			 * make wchan0 non-zero to conform to the rule that
			 * threads blocking for user-level objects have a
			 * non-zero wchan0: this prevents spurious wake-ups
			 * by, for example, /proc.
			 */
			t->t_wchan0 = (caddr_t)1;
		}
	}
	ts->ts_waiters++;
	sleepq_insert(&ts->ts_sleepq[qnum], t);

	if (SOBJ_TYPE(sobj_ops) == SOBJ_MUTEX &&
	    SOBJ_OWNER(sobj_ops, sobj) == NULL)
		panic("turnstile_block(%p): unowned mutex", (void *)ts);

	/*
	 * Follow the blocking chain to its end, willing our priority to
	 * everyone who's in our way.
	 */
	while (t->t_sobj_ops != NULL &&
	    (owner = SOBJ_OWNER(t->t_sobj_ops, t->t_wchan)) != NULL) {
		if (owner == curthread) {
			if (SOBJ_TYPE(sobj_ops) != SOBJ_USER_PI) {
				panic("Deadlock: cycle in blocking chain");
			}
			/*
			 * If the cycle we've encountered ends in mp,
			 * then we know it isn't a 'real' cycle because
			 * we're going to drop mp before we go to sleep.
			 * Moreover, since we've come full circle we know
			 * that we must have willed priority to everyone
			 * in our way.  Therefore, we can break out now.
			 */
			if (t->t_wchan == (void *)mp)
				break;

			if (loser)
				lock_clear(&turnstile_loser_lock);
			/*
			 * For SOBJ_USER_PI, a cycle is an application
			 * deadlock which needs to be communicated
			 * back to the application.
			 */
			thread_unlock_nopreempt(t);
			mutex_exit(mp);
			setrun(curthread);
			swtch(); /* necessary to transition state */
			curthread->t_flag &= ~T_WAKEABLE;
			if (lwptp->lwpt_id != 0)
				(void) lwp_timer_dequeue(lwptp);
			setallwatch();
			lwp->lwp_asleep = 0;
			lwp->lwp_sysabort = 0;
			return (EDEADLK);
		}
		if (!turnstile_interlock(t->t_lockp, &owner->t_lockp)) {
			/*
			 * If we failed to grab the owner's thread lock,
			 * turnstile_interlock() will have dropped t's
			 * thread lock, so at this point we don't even know
			 * that 't' exists anymore.  The simplest solution
			 * is to restart the entire priority inheritance dance
			 * from the beginning of the blocking chain, since
			 * we *do* know that 'curthread' still exists.
			 * Application of priority inheritance is idempotent,
			 * so it's OK that we're doing it more than once.
			 * Note also that since we've dropped our thread lock,
			 * we may already have been woken up; if so, our
			 * t_sobj_ops will be NULL, the loop will terminate,
			 * and the call to swtch() will be a no-op.  Phew.
			 *
			 * There is one further complication: if two (or more)
			 * threads keep trying to grab the turnstile locks out
			 * of order and keep losing the race to another thread,
			 * these "dueling losers" can livelock the system.
			 * Therefore, once we get into this rare situation,
			 * we serialize all the losers.
			 */
			if (loser == 0) {
				loser = 1;
				lock_set(&turnstile_loser_lock);
			}
			t = curthread;
			thread_lock_high(t);
			continue;
		}

		/*
		 * We now have the owner's thread lock.  If we are traversing
		 * from non-SOBJ_USER_PI ops to SOBJ_USER_PI ops, then we know
		 * that we have caught the thread while in the TS_SLEEP state,
		 * but holding mp.  We know that this situation is transient
		 * (mp will be dropped before the holder actually sleeps on
		 * the SOBJ_USER_PI sobj), so we will spin waiting for mp to
		 * be dropped.  Then, as in the turnstile_interlock() failure
		 * case, we will restart the priority inheritance dance.
		 */
		if (SOBJ_TYPE(t->t_sobj_ops) != SOBJ_USER_PI &&
		    owner->t_sobj_ops != NULL &&
		    SOBJ_TYPE(owner->t_sobj_ops) == SOBJ_USER_PI) {
			kmutex_t *upi_lock = (kmutex_t *)t->t_wchan;

			ASSERT(IS_UPI(upi_lock));
			ASSERT(SOBJ_TYPE(t->t_sobj_ops) == SOBJ_MUTEX);

			if (t->t_lockp != owner->t_lockp)
				thread_unlock_high(owner);
			thread_unlock_high(t);
			if (loser)
				lock_clear(&turnstile_loser_lock);

			while (mutex_owner(upi_lock) == owner) {
				SMT_PAUSE();
				continue;
			}

			if (loser)
				lock_set(&turnstile_loser_lock);
			t = curthread;
			thread_lock_high(t);
			continue;
		}

		turnstile_pi_inherit(t->t_ts, owner, DISP_PRIO(t));
		if (t->t_lockp != owner->t_lockp)
			thread_unlock_high(t);
		t = owner;
	}

	if (loser)
		lock_clear(&turnstile_loser_lock);

	/*
	 * Note: 't' and 'curthread' were synonymous before the loop above,
	 * but now they may be different.  ('t' is now the last thread in
	 * the blocking chain.)
	 */
	if (SOBJ_TYPE(sobj_ops) == SOBJ_USER_PI) {
		ushort_t s = curthread->t_oldspl;
		int timedwait = 0;
		uint_t imm_timeout = 0;
		clock_t tim = -1;

		thread_unlock_high(t);
		if (lwptp->lwpt_id != 0) {
			/*
			 * We enqueued a timeout.  If it has already fired,
			 * lwptp->lwpt_imm_timeout has been set with cas,
			 * so fetch it with cas.
			 */
			timedwait = 1;
			imm_timeout =
			    atomic_cas_uint(&lwptp->lwpt_imm_timeout, 0, 0);
		}
		mutex_exit(mp);
		splx(s);

		if (ISSIG(curthread, JUSTLOOKING) ||
		    MUSTRETURN(p, curthread) || imm_timeout)
			setrun(curthread);
		swtch();
		curthread->t_flag &= ~T_WAKEABLE;
		if (timedwait)
			tim = lwp_timer_dequeue(lwptp);
		setallwatch();
		if (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort ||
		    MUSTRETURN(p, curthread))
			error = EINTR;
		else if (imm_timeout || (timedwait && tim == -1))
			error = ETIME;
		lwp->lwp_sysabort = 0;
		lwp->lwp_asleep = 0;
	} else {
		thread_unlock_nopreempt(t);
		swtch();
	}

	return (error);
}