Beispiel #1
0
void mcs_rwlock::downgrade() 
{
    membar_exit();  // this is for all intents and purposes, a release
    w_assert1(*&_holders == WRITER);
    *&_holders = READER;
    membar_enter(); // but it's also an acquire
}
Beispiel #2
0
void mcs_rwlock::release_read() 
{
    w_assert2(has_reader());
    membar_exit(); // flush protected modified data before releasing lock;
    // update and complete any loads by others before I do this write 
    atomic_add_32(&_holders, -READER);
}
/*
 * callout_bind:
 *
 *	Bind a callout so that it will only execute on one CPU.
 *	The callout must be stopped, and must be MPSAFE.
 *
 *	XXX Disabled for now until it is decided how to handle
 *	offlined CPUs.  We may want weak+strong binding.
 */
void
callout_bind(callout_t *cs, struct cpu_info *ci)
{
	callout_impl_t *c = (callout_impl_t *)cs;
	struct callout_cpu *cc;
	kmutex_t *lock;

	KASSERT((c->c_flags & CALLOUT_PENDING) == 0);
	KASSERT(c->c_cpu->cc_active != c);
	KASSERT(c->c_magic == CALLOUT_MAGIC);
	KASSERT((c->c_flags & CALLOUT_MPSAFE) != 0);

	lock = callout_lock(c);
	cc = ci->ci_data.cpu_callout;
	c->c_flags |= CALLOUT_BOUND;
	if (c->c_cpu != cc) {
		/*
		 * Assigning c_cpu effectively unlocks the callout
		 * structure, as we don't hold the new CPU's lock.
		 * Issue memory barrier to prevent accesses being
		 * reordered.
		 */
		membar_exit();
		c->c_cpu = cc;
	}
	mutex_spin_exit(lock);
}
Beispiel #4
0
static void
shmif_unlockbus(struct shmif_mem *busmem)
{
	unsigned int old;

	membar_exit();
	old = atomic_swap_32(&busmem->shm_lock, LOCK_UNLOCKED);
	KASSERT(old == LOCK_LOCKED);
}
Beispiel #5
0
void
rump_unschedule_cpu1(struct lwp *l, void *interlock)
{
    struct rumpcpu *rcpu;
    struct cpu_info *ci;
    void *old;

    ci = l->l_cpu;
    ci->ci_curlwp = ci->ci_data.cpu_onproc = NULL;
    rcpu = cpuinfo_to_rumpcpu(ci);

    KASSERT(rcpu->rcpu_ci == ci);

    /*
     * Make sure all stores are seen before the CPU release.  This
     * is relevant only in the non-fastpath scheduling case, but
     * we don't know here if that's going to happen, so need to
     * expect the worst.
     *
     * If the scheduler interlock was requested by the caller, we
     * need to obtain it before we release the CPU.  Otherwise, we risk a
     * race condition where another thread is scheduled onto the
     * rump kernel CPU before our current thread can
     * grab the interlock.
     */
    if (interlock == rcpu->rcpu_mtx)
        rumpuser_mutex_enter_nowrap(rcpu->rcpu_mtx);
    else
        membar_exit();

    /* Release the CPU. */
    old = atomic_swap_ptr(&rcpu->rcpu_prevlwp, l);

    /* No waiters?  No problems.  We're outta here. */
    if (old == RCPULWP_BUSY) {
        return;
    }

    KASSERT(old == RCPULWP_WANTED);

    /*
     * Ok, things weren't so snappy.
     *
     * Snailpath: take lock and signal anyone waiting for this CPU.
     */

    if (interlock != rcpu->rcpu_mtx)
        rumpuser_mutex_enter_nowrap(rcpu->rcpu_mtx);
    if (rcpu->rcpu_wanted)
        rumpuser_cv_broadcast(rcpu->rcpu_cv);
    if (interlock != rcpu->rcpu_mtx)
        rumpuser_mutex_exit(rcpu->rcpu_mtx);
}
Beispiel #6
0
void
mtx_leave(struct mutex *mtx)
{
	int s;

	MUTEX_ASSERT_LOCKED(mtx);

#ifdef MULTIPROCESSOR
	membar_exit();
#endif
#ifdef DIAGNOSTIC
	curcpu()->ci_mutex_level--;
#endif

	s = mtx->mtx_oldipl;
	mtx->mtx_owner = NULL;
	if (mtx->mtx_wantipl != IPL_NONE)
		splx(s);
}
Beispiel #7
0
void block_bits::release_contiguous(size_t index, size_t chip_count) {
    // assign this chip to the zombie set for later recycling
    (void) chip_count; // keep gcc happy
    assert(index < chip_count);
    bitmap to_free = bitmap(1) << index;
    assert(! (to_free & *usable_chips()));
    membar_exit();
    bitmap volatile* ptr = &_zombie_chips;
    bitmap ov = *ptr;
    while(1) {
        bitmap nv = ov | to_free;
        bitmap cv = atomic_cas_64(ptr, ov, nv);
        if(cv == ov)
            break;
        ov = cv;
    }
    bitmap was_free = ov;
    (void) was_free; // keep gcc happy

    assert( ! (was_free & to_free));
}
Beispiel #8
0
void block_bits::recycle() {
    /* recycle the zombies in the block.

       Whatever bits have gone zombie since we last recycled become
       OR-ed into the set of usable bits. We also XOR them atomically back
       into the zombie set to clear them out there. That way we don't
       leak bits if a releasing thread races us and adds more bits to the
       zombie set after we read it.
    */
    bitmap newly_usable = *&_zombie_chips;
    _usable_chips |= newly_usable;
    membar_exit();
    bitmap volatile* ptr = &_zombie_chips;
    bitmap ov = *ptr;
    while(1) {
        bitmap nv = ov ^ newly_usable; // XOR
        bitmap cv = atomic_cas_64(ptr, ov, nv);
        if(cv == ov)
            break;
        ov = cv;
    }
}
Beispiel #9
0
void atomicSet(volatile atomic_t& val, int n)
{
    membar_exit();
    val.l = n;
}
Beispiel #10
0
void mcs_rwlock::release_write() {
    membar_exit(); // flush protected modified data before releasing lock;
    w_assert1(*&_holders == WRITER);
    *&_holders = 0;
}